From a5d7400232d67643fa7e1695538ea31b21e55375 Mon Sep 17 00:00:00 2001 From: Pires <1752631+pires@users.noreply.github.com> Date: Thu, 28 Feb 2019 23:04:13 +0000 Subject: [PATCH] providers: remove hypersh provider (#524) Hyper.sh was shutdown on Jan 16, 2019. https://blog.hyper.sh/close_hyper_sh.html Signed-off-by: Paulo Pires --- CONTRIBUTING.md | 4 - Gopkg.lock | 143 +- Gopkg.toml | 21 - ISSUE_TEMPLATE.md | 2 +- Makefile | 2 +- README.md | 13 +- providers/hypersh/README.md | 124 - providers/hypersh/hypersh.go | 457 --- providers/hypersh/util.go | 419 --- providers/register/provider_hyper.go | 16 - .../candiedyaml/LICENSE | 201 - .../cloudfoundry-incubator/candiedyaml/NOTICE | 18 - .../cloudfoundry-incubator/candiedyaml/api.go | 834 ----- .../candiedyaml/decode.go | 622 --- .../candiedyaml/emitter.go | 2072 ---------- .../candiedyaml/encode.go | 395 -- .../candiedyaml/parser.go | 1230 ------ .../candiedyaml/reader.go | 465 --- .../candiedyaml/resolver.go | 449 --- .../candiedyaml/run_parser.go | 62 - .../candiedyaml/scanner.go | 3318 ----------------- .../candiedyaml/tags.go | 360 -- .../candiedyaml/writer.go | 128 - .../candiedyaml/yaml_definesh.go | 22 - .../candiedyaml/yaml_privateh.go | 891 ----- .../candiedyaml/yamlh.go | 953 ----- .../registry/api/errcode/errors.go | 267 -- .../registry/api/errcode/handler.go | 44 - .../registry/api/errcode/register.go | 138 - .../registry/api/v2/descriptors.go | 1596 -------- .../distribution/registry/api/v2/doc.go | 9 - .../distribution/registry/api/v2/errors.go | 136 - .../registry/api/v2/headerparser.go | 161 - .../distribution/registry/api/v2/routes.go | 49 - .../distribution/registry/api/v2/urls.go | 263 -- .../registry/client/auth/challenge/addr.go | 27 - .../client/auth/challenge/authchallenge.go | 237 -- .../registry/client/blob_writer.go | 162 - .../distribution/registry/client/errors.go | 139 - .../registry/client/repository.go | 853 ----- .../registry/client/transport/http_reader.go | 251 -- .../registry/client/transport/transport.go | 147 - .../registry/storage/cache/cache.go | 35 - .../cache/cachedblobdescriptorstore.go | 101 - .../registry/storage/cache/memory/memory.go | 179 - vendor/github.com/docker/engine-api/LICENSE | 191 - .../client/transport/cancellable/LICENSE | 27 - .../engine-api/types/strslice/strslice.go | 30 - vendor/github.com/flynn/go-shlex/COPYING | 202 - vendor/github.com/flynn/go-shlex/shlex.go | 457 --- vendor/github.com/hyperhq/hyper-api/LICENSE | 191 - .../hyper-api/client/checkpoint_create.go | 14 - .../hyper-api/client/checkpoint_delete.go | 10 - .../hyper-api/client/checkpoint_list.go | 22 - .../hyperhq/hyper-api/client/client.go | 153 - .../hyperhq/hyper-api/client/client_darwin.go | 4 - .../hyperhq/hyper-api/client/client_unix.go | 6 - .../hyper-api/client/client_windows.go | 4 - .../hyperhq/hyper-api/client/compose.go | 149 - .../hyper-api/client/container_attach.go | 34 - .../hyper-api/client/container_commit.go | 53 - .../hyper-api/client/container_copy.go | 96 - .../hyper-api/client/container_create.go | 46 - .../hyper-api/client/container_diff.go | 23 - .../hyper-api/client/container_exec.go | 49 - .../hyper-api/client/container_export.go | 20 - .../hyper-api/client/container_inspect.go | 54 - .../hyper-api/client/container_kill.go | 16 - .../hyper-api/client/container_list.go | 56 - .../hyper-api/client/container_logs.go | 51 - .../hyper-api/client/container_pause.go | 10 - .../hyper-api/client/container_remove.go | 32 - .../hyper-api/client/container_rename.go | 16 - .../hyper-api/client/container_resize.go | 29 - .../hyper-api/client/container_restart.go | 19 - .../hyper-api/client/container_start.go | 17 - .../hyper-api/client/container_stats.go | 24 - .../hyper-api/client/container_stop.go | 18 - .../hyperhq/hyper-api/client/container_top.go | 28 - .../hyper-api/client/container_unpause.go | 10 - .../hyper-api/client/container_update.go | 10 - .../hyper-api/client/container_wait.go | 26 - .../hyperhq/hyper-api/client/cron.go | 106 - .../hyperhq/hyper-api/client/errors.go | 165 - .../hyperhq/hyper-api/client/events.go | 48 - .../hyperhq/hyper-api/client/fip.go | 90 - .../hyperhq/hyper-api/client/func.go | 283 -- .../hyperhq/hyper-api/client/hijack.go | 176 - .../hyperhq/hyper-api/client/image_build.go | 119 - .../hyperhq/hyper-api/client/image_create.go | 34 - .../hyperhq/hyper-api/client/image_history.go | 22 - .../hyperhq/hyper-api/client/image_import.go | 37 - .../hyperhq/hyper-api/client/image_inspect.go | 38 - .../hyperhq/hyper-api/client/image_list.go | 40 - .../hyperhq/hyper-api/client/image_load.go | 22 - .../hyper-api/client/image_load_local.go | 83 - .../hyperhq/hyper-api/client/image_pull.go | 46 - .../hyperhq/hyper-api/client/image_push.go | 54 - .../hyperhq/hyper-api/client/image_remove.go | 31 - .../hyperhq/hyper-api/client/image_save.go | 21 - .../hyperhq/hyper-api/client/image_search.go | 49 - .../hyperhq/hyper-api/client/image_tag.go | 37 - .../hyperhq/hyper-api/client/info.go | 26 - .../hyperhq/hyper-api/client/interface.go | 139 - .../hyperhq/hyper-api/client/login.go | 28 - .../hyper-api/client/network_connect.go | 18 - .../hyper-api/client/network_create.go | 25 - .../hyper-api/client/network_disconnect.go | 14 - .../hyper-api/client/network_inspect.go | 38 - .../hyperhq/hyper-api/client/network_list.go | 31 - .../hyper-api/client/network_remove.go | 10 - .../hyperhq/hyper-api/client/request.go | 195 - .../hyperhq/hyper-api/client/service.go | 95 - .../github.com/hyperhq/hyper-api/client/sg.go | 69 - .../hyperhq/hyper-api/client/snapshot.go | 73 - .../client/transport/cancellable/LICENSE | 27 - .../client/transport/cancellable/canceler.go | 23 - .../transport/cancellable/canceler_go14.go | 27 - .../transport/cancellable/cancellable.go | 113 - .../hyper-api/client/transport/client.go | 47 - .../client/transport/tlsconfig_clone.go | 11 - .../client/transport/tlsconfig_clone_go17.go | 33 - .../hyper-api/client/transport/transport.go | 57 - .../hyperhq/hyper-api/client/version.go | 21 - .../hyperhq/hyper-api/client/volume_create.go | 20 - .../hyperhq/hyper-api/client/volume_init.go | 39 - .../hyper-api/client/volume_inspect.go | 38 - .../hyperhq/hyper-api/client/volume_list.go | 32 - .../hyperhq/hyper-api/client/volume_remove.go | 10 - .../hyperhq/hyper-api/signature/sign.go | 107 - .../hyperhq/hyper-api/signature/sign4.go | 365 -- .../hyperhq/hyper-api/types/auth.go | 22 - .../hyperhq/hyper-api/types/blkiodev/blkio.go | 23 - .../hyperhq/hyper-api/types/client.go | 249 -- .../hyperhq/hyper-api/types/configs.go | 53 - .../hyper-api/types/container/config.go | 37 - .../hyper-api/types/container/host_config.go | 319 -- .../hyper-api/types/container/hostconfig.go | 79 - .../hyperhq/hyper-api/types/cron.go | 67 - .../hyperhq/hyper-api/types/errors.go | 6 - .../hyperhq/hyper-api/types/filters/parse.go | 307 -- .../hyperhq/hyper-api/types/func.go | 84 - .../hyper-api/types/network/network.go | 52 - .../hyperhq/hyper-api/types/plugin.go | 170 - .../types/reference/image_reference.go | 34 - .../hyper-api/types/registry/registry.go | 99 - .../hyperhq/hyper-api/types/seccomp.go | 68 - .../hyperhq/hyper-api/types/service.go | 61 - .../hyperhq/hyper-api/types/stats.go | 115 - .../hyper-api/types/strslice/strslice.go | 30 - .../hyper-api/types/time/duration_convert.go | 12 - .../hyperhq/hyper-api/types/time/timestamp.go | 124 - .../hyperhq/hyper-api/types/types.go | 562 --- .../hyper-api/types/versions/compare.go | 62 - vendor/github.com/hyperhq/hypercli/AUTHORS | 1033 ----- vendor/github.com/hyperhq/hypercli/LICENSE | 191 - vendor/github.com/hyperhq/hypercli/NOTICE | 8 - .../hyperhq/hypercli/cliconfig/config.go | 292 -- .../contrib/docker-engine-selinux/LICENSE | 340 -- .../hypercli/contrib/syntax/vim/LICENSE | 22 - .../hypercli/daemon/graphdriver/driver.go | 218 -- .../daemon/graphdriver/driver_freebsd.go | 8 - .../daemon/graphdriver/driver_linux.go | 88 - .../daemon/graphdriver/driver_unsupported.go | 15 - .../daemon/graphdriver/driver_windows.go | 16 - .../hypercli/daemon/graphdriver/fsdiff.go | 162 - .../hypercli/daemon/graphdriver/plugin.go | 32 - .../daemon/graphdriver/plugin_unsupported.go | 7 - .../hypercli/daemon/graphdriver/proxy.go | 210 -- .../docs/static_files/contributors.png | Bin 23100 -> 0 bytes .../hyperhq/hypercli/hack/generate-authors.sh | 15 - .../github.com/hyperhq/hypercli/image/fs.go | 184 - .../hyperhq/hypercli/image/image.go | 138 - .../hyperhq/hypercli/image/rootfs.go | 8 - .../hyperhq/hypercli/image/rootfs_unix.go | 23 - .../hyperhq/hypercli/image/rootfs_windows.go | 37 - .../hyperhq/hypercli/image/store.go | 289 -- .../hyperhq/hypercli/image/v1/imagev1.go | 148 - .../hyperhq/hypercli/layer/empty.go | 48 - .../hyperhq/hypercli/layer/filestore.go | 326 -- .../hyperhq/hypercli/layer/layer.go | 256 -- .../hyperhq/hypercli/layer/layer_store.go | 626 ---- .../hyperhq/hypercli/layer/layer_unix.go | 9 - .../hyperhq/hypercli/layer/layer_windows.go | 98 - .../hyperhq/hypercli/layer/migration.go | 255 -- .../hyperhq/hypercli/layer/mounted_layer.go | 144 - .../hyperhq/hypercli/layer/ro_layer.go | 119 - .../github.com/hyperhq/hypercli/opts/hosts.go | 148 - .../hyperhq/hypercli/opts/hosts_unix.go | 8 - .../hyperhq/hypercli/opts/hosts_windows.go | 6 - vendor/github.com/hyperhq/hypercli/opts/ip.go | 42 - .../github.com/hyperhq/hypercli/opts/opts.go | 242 -- .../hyperhq/hypercli/opts/opts_unix.go | 6 - .../hyperhq/hypercli/opts/opts_windows.go | 56 - .../hyperhq/hypercli/pkg/archive/archive.go | 1049 ------ .../hypercli/pkg/archive/archive_unix.go | 112 - .../hypercli/pkg/archive/archive_windows.go | 70 - .../hyperhq/hypercli/pkg/archive/changes.go | 416 --- .../hypercli/pkg/archive/changes_linux.go | 285 -- .../hypercli/pkg/archive/changes_other.go | 97 - .../hypercli/pkg/archive/changes_unix.go | 36 - .../hypercli/pkg/archive/changes_windows.go | 30 - .../hyperhq/hypercli/pkg/archive/copy.go | 458 --- .../hyperhq/hypercli/pkg/archive/copy_unix.go | 11 - .../hypercli/pkg/archive/copy_windows.go | 9 - .../hyperhq/hypercli/pkg/archive/diff.go | 279 -- .../hypercli/pkg/archive/example_changes.go | 97 - .../hypercli/pkg/archive/time_linux.go | 16 - .../hypercli/pkg/archive/time_unsupported.go | 16 - .../hyperhq/hypercli/pkg/archive/whiteouts.go | 23 - .../hyperhq/hypercli/pkg/archive/wrap.go | 59 - .../hypercli/pkg/chrootarchive/archive.go | 97 - .../pkg/chrootarchive/archive_unix.go | 91 - .../pkg/chrootarchive/archive_windows.go | 22 - .../hypercli/pkg/chrootarchive/diff.go | 19 - .../hypercli/pkg/chrootarchive/diff_unix.go | 118 - .../pkg/chrootarchive/diff_windows.go | 44 - .../hypercli/pkg/chrootarchive/init_unix.go | 28 - .../pkg/chrootarchive/init_windows.go | 4 - .../hypercli/pkg/fileutils/fileutils.go | 279 -- .../hypercli/pkg/fileutils/fileutils_unix.go | 22 - .../pkg/fileutils/fileutils_windows.go | 7 - .../hyperhq/hypercli/pkg/homedir/homedir.go | 39 - .../hypercli/pkg/httputils/httputils.go | 56 - .../hypercli/pkg/httputils/mimetype.go | 30 - .../pkg/httputils/resumablerequestreader.go | 95 - .../hyperhq/hypercli/pkg/idtools/idtools.go | 195 - .../hypercli/pkg/idtools/idtools_unix.go | 60 - .../hypercli/pkg/idtools/idtools_windows.go | 18 - .../pkg/idtools/usergroupadd_linux.go | 155 - .../pkg/idtools/usergroupadd_unsupported.go | 12 - .../hyperhq/hypercli/pkg/ioutils/bytespipe.go | 152 - .../hyperhq/hypercli/pkg/ioutils/fmt.go | 22 - .../hypercli/pkg/ioutils/multireader.go | 226 -- .../hyperhq/hypercli/pkg/ioutils/readers.go | 154 - .../hyperhq/hypercli/pkg/ioutils/scheduler.go | 6 - .../hypercli/pkg/ioutils/scheduler_gccgo.go | 13 - .../hyperhq/hypercli/pkg/ioutils/temp_unix.go | 10 - .../hypercli/pkg/ioutils/temp_windows.go | 18 - .../hypercli/pkg/ioutils/writeflusher.go | 92 - .../hyperhq/hypercli/pkg/ioutils/writers.go | 66 - .../hyperhq/hypercli/pkg/jsonlog/jsonlog.go | 40 - .../pkg/jsonlog/jsonlog_marshalling.go | 180 - .../hypercli/pkg/jsonlog/jsonlogbytes.go | 124 - .../hypercli/pkg/jsonlog/time_marshalling.go | 27 - .../hypercli/pkg/jsonmessage/jsonmessage.go | 221 -- .../hyperhq/hypercli/pkg/longpath/longpath.go | 26 - .../hyperhq/hypercli/pkg/mflag/LICENSE | 27 - .../hyperhq/hypercli/pkg/mflag/flag.go | 1280 ------- .../hyperhq/hypercli/pkg/plugins/client.go | 162 - .../hyperhq/hypercli/pkg/plugins/discovery.go | 130 - .../hyperhq/hypercli/pkg/plugins/errors.go | 33 - .../hyperhq/hypercli/pkg/plugins/plugins.go | 222 -- .../hyperhq/hypercli/pkg/pools/pools.go | 119 - .../hyperhq/hypercli/pkg/promise/promise.go | 11 - .../hyperhq/hypercli/pkg/random/random.go | 71 - .../hypercli/pkg/reexec/command_freebsd.go | 23 - .../hypercli/pkg/reexec/command_linux.go | 28 - .../pkg/reexec/command_unsupported.go | 12 - .../hypercli/pkg/reexec/command_windows.go | 23 - .../hyperhq/hypercli/pkg/reexec/reexec.go | 47 - .../hypercli/pkg/selfupdate/LICENSE.md | 21 - .../hyperhq/hypercli/pkg/stringid/stringid.go | 71 - .../hypercli/pkg/symlink/LICENSE.APACHE | 191 - .../hyperhq/hypercli/pkg/symlink/LICENSE.BSD | 27 - .../hyperhq/hypercli/pkg/system/chtimes.go | 47 - .../hyperhq/hypercli/pkg/system/errors.go | 10 - .../hypercli/pkg/system/events_windows.go | 83 - .../hyperhq/hypercli/pkg/system/filesys.go | 19 - .../hypercli/pkg/system/filesys_windows.go | 82 - .../hyperhq/hypercli/pkg/system/lstat.go | 19 - .../hypercli/pkg/system/lstat_windows.go | 25 - .../hyperhq/hypercli/pkg/system/meminfo.go | 17 - .../hypercli/pkg/system/meminfo_linux.go | 66 - .../pkg/system/meminfo_unsupported.go | 8 - .../hypercli/pkg/system/meminfo_windows.go | 44 - .../hyperhq/hypercli/pkg/system/mknod.go | 22 - .../hypercli/pkg/system/mknod_windows.go | 13 - .../hyperhq/hypercli/pkg/system/path_unix.go | 8 - .../hypercli/pkg/system/path_windows.go | 7 - .../hyperhq/hypercli/pkg/system/stat.go | 53 - .../hypercli/pkg/system/stat_freebsd.go | 27 - .../hyperhq/hypercli/pkg/system/stat_linux.go | 33 - .../hypercli/pkg/system/stat_solaris.go | 17 - .../hypercli/pkg/system/stat_unsupported.go | 17 - .../hypercli/pkg/system/stat_windows.go | 43 - .../hypercli/pkg/system/syscall_unix.go | 11 - .../hypercli/pkg/system/syscall_windows.go | 36 - .../hyperhq/hypercli/pkg/system/umask.go | 13 - .../hypercli/pkg/system/umask_windows.go | 9 - .../hypercli/pkg/system/utimes_darwin.go | 8 - .../hypercli/pkg/system/utimes_freebsd.go | 22 - .../hypercli/pkg/system/utimes_linux.go | 26 - .../hypercli/pkg/system/utimes_unsupported.go | 10 - .../hypercli/pkg/system/xattrs_linux.go | 63 - .../hypercli/pkg/system/xattrs_unsupported.go | 13 - .../hypercli/pkg/tarsum/builder_context.go | 21 - .../hypercli/pkg/tarsum/fileinfosums.go | 126 - .../hyperhq/hypercli/pkg/tarsum/tarsum.go | 294 -- .../hyperhq/hypercli/pkg/tarsum/versioning.go | 150 - .../hypercli/pkg/tarsum/writercloser.go | 22 - .../hyperhq/hypercli/pkg/term/ascii.go | 66 - .../hyperhq/hypercli/pkg/term/tc_linux_cgo.go | 51 - .../hyperhq/hypercli/pkg/term/tc_other.go | 19 - .../hyperhq/hypercli/pkg/term/term.go | 132 - .../hyperhq/hypercli/pkg/term/term_windows.go | 305 -- .../hypercli/pkg/term/termios_darwin.go | 69 - .../hypercli/pkg/term/termios_freebsd.go | 69 - .../hypercli/pkg/term/termios_linux.go | 47 - .../hypercli/pkg/term/windows/ansi_reader.go | 257 -- .../hypercli/pkg/term/windows/ansi_writer.go | 76 - .../hypercli/pkg/term/windows/console.go | 97 - .../hypercli/pkg/term/windows/windows.go | 5 - .../hyperhq/hypercli/pkg/urlutil/urlutil.go | 50 - .../hyperhq/hypercli/pkg/version/version.go | 68 - .../hyperhq/hypercli/project/CONTRIBUTORS.md | 434 --- .../hyperhq/hypercli/reference/reference.go | 191 - .../hyperhq/hypercli/reference/store.go | 298 -- .../hyperhq/hypercli/registry/auth.go | 255 -- .../hypercli/registry/authchallenge.go | 150 - .../hyperhq/hypercli/registry/config.go | 257 -- .../hyperhq/hypercli/registry/config_unix.go | 24 - .../hypercli/registry/config_windows.go | 30 - .../hyperhq/hypercli/registry/endpoint.go | 277 -- .../hyperhq/hypercli/registry/reference.go | 68 - .../hyperhq/hypercli/registry/registry.go | 236 -- .../hyperhq/hypercli/registry/service.go | 188 - .../hyperhq/hypercli/registry/service_v1.go | 56 - .../hyperhq/hypercli/registry/service_v2.go | 74 - .../hyperhq/hypercli/registry/session.go | 770 ---- .../hyperhq/hypercli/registry/token.go | 81 - .../hyperhq/hypercli/registry/types.go | 70 - .../hyperhq/libcompose/config/convert.go | 43 - .../hyperhq/libcompose/config/hash.go | 95 - .../libcompose/config/interpolation.go | 169 - .../hyperhq/libcompose/config/merge.go | 150 - .../hyperhq/libcompose/config/merge_v1.go | 173 - .../hyperhq/libcompose/config/merge_v2.go | 211 -- .../hyperhq/libcompose/config/schema.go | 339 -- .../libcompose/config/schema_helpers.go | 96 - .../hyperhq/libcompose/config/types.go | 238 -- .../hyperhq/libcompose/config/utils.go | 42 - .../hyperhq/libcompose/config/validation.go | 309 -- .../hyperhq/libcompose/utils/util.go | 136 - .../hyperhq/libcompose/yaml/types_yaml.go | 288 -- .../gojsonpointer/LICENSE-APACHE-2.0.txt | 202 - .../xeipuuv/gojsonpointer/pointer.go | 211 -- .../gojsonreference/LICENSE-APACHE-2.0.txt | 202 - .../xeipuuv/gojsonreference/reference.go | 147 - .../gojsonschema/LICENSE-APACHE-2.0.txt | 202 - .../github.com/xeipuuv/gojsonschema/errors.go | 283 -- .../xeipuuv/gojsonschema/format_checkers.go | 203 - .../xeipuuv/gojsonschema/internalLog.go | 37 - .../xeipuuv/gojsonschema/jsonContext.go | 72 - .../xeipuuv/gojsonschema/jsonLoader.go | 341 -- .../json_schema_test_suite/LICENSE | 19 - .../xeipuuv/gojsonschema/locales.go | 286 -- .../github.com/xeipuuv/gojsonschema/result.go | 172 - .../github.com/xeipuuv/gojsonschema/schema.go | 933 ----- .../xeipuuv/gojsonschema/schemaPool.go | 109 - .../gojsonschema/schemaReferencePool.go | 67 - .../xeipuuv/gojsonschema/schemaType.go | 83 - .../xeipuuv/gojsonschema/subSchema.go | 227 -- .../github.com/xeipuuv/gojsonschema/types.go | 58 - .../github.com/xeipuuv/gojsonschema/utils.go | 208 -- .../xeipuuv/gojsonschema/validation.go | 832 ----- website/data/providers.yaml | 2 - 367 files changed, 4 insertions(+), 53716 deletions(-) delete mode 100644 providers/hypersh/README.md delete mode 100755 providers/hypersh/hypersh.go delete mode 100644 providers/hypersh/util.go delete mode 100644 providers/register/provider_hyper.go delete mode 100644 vendor/github.com/cloudfoundry-incubator/candiedyaml/LICENSE delete mode 100644 vendor/github.com/cloudfoundry-incubator/candiedyaml/NOTICE delete mode 100644 vendor/github.com/cloudfoundry-incubator/candiedyaml/api.go delete mode 100644 vendor/github.com/cloudfoundry-incubator/candiedyaml/decode.go delete mode 100644 vendor/github.com/cloudfoundry-incubator/candiedyaml/emitter.go delete mode 100644 vendor/github.com/cloudfoundry-incubator/candiedyaml/encode.go delete mode 100644 vendor/github.com/cloudfoundry-incubator/candiedyaml/parser.go delete mode 100644 vendor/github.com/cloudfoundry-incubator/candiedyaml/reader.go delete mode 100644 vendor/github.com/cloudfoundry-incubator/candiedyaml/resolver.go delete mode 100644 vendor/github.com/cloudfoundry-incubator/candiedyaml/run_parser.go delete mode 100644 vendor/github.com/cloudfoundry-incubator/candiedyaml/scanner.go delete mode 100644 vendor/github.com/cloudfoundry-incubator/candiedyaml/tags.go delete mode 100644 vendor/github.com/cloudfoundry-incubator/candiedyaml/writer.go delete mode 100644 vendor/github.com/cloudfoundry-incubator/candiedyaml/yaml_definesh.go delete mode 100644 vendor/github.com/cloudfoundry-incubator/candiedyaml/yaml_privateh.go delete mode 100644 vendor/github.com/cloudfoundry-incubator/candiedyaml/yamlh.go delete mode 100644 vendor/github.com/docker/distribution/registry/api/errcode/errors.go delete mode 100644 vendor/github.com/docker/distribution/registry/api/errcode/handler.go delete mode 100644 vendor/github.com/docker/distribution/registry/api/errcode/register.go delete mode 100644 vendor/github.com/docker/distribution/registry/api/v2/descriptors.go delete mode 100644 vendor/github.com/docker/distribution/registry/api/v2/doc.go delete mode 100644 vendor/github.com/docker/distribution/registry/api/v2/errors.go delete mode 100644 vendor/github.com/docker/distribution/registry/api/v2/headerparser.go delete mode 100644 vendor/github.com/docker/distribution/registry/api/v2/routes.go delete mode 100644 vendor/github.com/docker/distribution/registry/api/v2/urls.go delete mode 100644 vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go delete mode 100644 vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go delete mode 100644 vendor/github.com/docker/distribution/registry/client/blob_writer.go delete mode 100644 vendor/github.com/docker/distribution/registry/client/errors.go delete mode 100644 vendor/github.com/docker/distribution/registry/client/repository.go delete mode 100644 vendor/github.com/docker/distribution/registry/client/transport/http_reader.go delete mode 100644 vendor/github.com/docker/distribution/registry/client/transport/transport.go delete mode 100644 vendor/github.com/docker/distribution/registry/storage/cache/cache.go delete mode 100644 vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go delete mode 100644 vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go delete mode 100644 vendor/github.com/docker/engine-api/LICENSE delete mode 100644 vendor/github.com/docker/engine-api/client/transport/cancellable/LICENSE delete mode 100644 vendor/github.com/docker/engine-api/types/strslice/strslice.go delete mode 100644 vendor/github.com/flynn/go-shlex/COPYING delete mode 100644 vendor/github.com/flynn/go-shlex/shlex.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/LICENSE delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/checkpoint_create.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/checkpoint_delete.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/checkpoint_list.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/client.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/client_darwin.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/client_unix.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/client_windows.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/compose.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/container_attach.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/container_commit.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/container_copy.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/container_create.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/container_diff.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/container_exec.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/container_export.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/container_inspect.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/container_kill.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/container_list.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/container_logs.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/container_pause.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/container_remove.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/container_rename.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/container_resize.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/container_restart.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/container_start.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/container_stats.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/container_stop.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/container_top.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/container_unpause.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/container_update.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/container_wait.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/cron.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/errors.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/events.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/fip.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/func.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/hijack.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/image_build.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/image_create.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/image_history.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/image_import.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/image_inspect.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/image_list.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/image_load.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/image_load_local.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/image_pull.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/image_push.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/image_remove.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/image_save.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/image_search.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/image_tag.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/info.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/interface.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/login.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/network_connect.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/network_create.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/network_disconnect.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/network_inspect.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/network_list.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/network_remove.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/request.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/service.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/sg.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/snapshot.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/transport/cancellable/LICENSE delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/transport/cancellable/canceler.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/transport/cancellable/canceler_go14.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/transport/cancellable/cancellable.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/transport/client.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/transport/tlsconfig_clone.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/transport/tlsconfig_clone_go17.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/transport/transport.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/version.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/volume_create.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/volume_init.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/volume_inspect.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/volume_list.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/client/volume_remove.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/signature/sign.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/signature/sign4.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/types/auth.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/types/blkiodev/blkio.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/types/client.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/types/configs.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/types/container/config.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/types/container/host_config.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/types/container/hostconfig.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/types/cron.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/types/errors.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/types/filters/parse.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/types/func.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/types/network/network.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/types/plugin.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/types/reference/image_reference.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/types/registry/registry.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/types/seccomp.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/types/service.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/types/stats.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/types/strslice/strslice.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/types/time/duration_convert.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/types/time/timestamp.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/types/types.go delete mode 100644 vendor/github.com/hyperhq/hyper-api/types/versions/compare.go delete mode 100644 vendor/github.com/hyperhq/hypercli/AUTHORS delete mode 100644 vendor/github.com/hyperhq/hypercli/LICENSE delete mode 100644 vendor/github.com/hyperhq/hypercli/NOTICE delete mode 100644 vendor/github.com/hyperhq/hypercli/cliconfig/config.go delete mode 100644 vendor/github.com/hyperhq/hypercli/contrib/docker-engine-selinux/LICENSE delete mode 100644 vendor/github.com/hyperhq/hypercli/contrib/syntax/vim/LICENSE delete mode 100644 vendor/github.com/hyperhq/hypercli/daemon/graphdriver/driver.go delete mode 100644 vendor/github.com/hyperhq/hypercli/daemon/graphdriver/driver_freebsd.go delete mode 100644 vendor/github.com/hyperhq/hypercli/daemon/graphdriver/driver_linux.go delete mode 100644 vendor/github.com/hyperhq/hypercli/daemon/graphdriver/driver_unsupported.go delete mode 100644 vendor/github.com/hyperhq/hypercli/daemon/graphdriver/driver_windows.go delete mode 100644 vendor/github.com/hyperhq/hypercli/daemon/graphdriver/fsdiff.go delete mode 100644 vendor/github.com/hyperhq/hypercli/daemon/graphdriver/plugin.go delete mode 100644 vendor/github.com/hyperhq/hypercli/daemon/graphdriver/plugin_unsupported.go delete mode 100644 vendor/github.com/hyperhq/hypercli/daemon/graphdriver/proxy.go delete mode 100644 vendor/github.com/hyperhq/hypercli/docs/static_files/contributors.png delete mode 100755 vendor/github.com/hyperhq/hypercli/hack/generate-authors.sh delete mode 100644 vendor/github.com/hyperhq/hypercli/image/fs.go delete mode 100644 vendor/github.com/hyperhq/hypercli/image/image.go delete mode 100644 vendor/github.com/hyperhq/hypercli/image/rootfs.go delete mode 100644 vendor/github.com/hyperhq/hypercli/image/rootfs_unix.go delete mode 100644 vendor/github.com/hyperhq/hypercli/image/rootfs_windows.go delete mode 100644 vendor/github.com/hyperhq/hypercli/image/store.go delete mode 100644 vendor/github.com/hyperhq/hypercli/image/v1/imagev1.go delete mode 100644 vendor/github.com/hyperhq/hypercli/layer/empty.go delete mode 100644 vendor/github.com/hyperhq/hypercli/layer/filestore.go delete mode 100644 vendor/github.com/hyperhq/hypercli/layer/layer.go delete mode 100644 vendor/github.com/hyperhq/hypercli/layer/layer_store.go delete mode 100644 vendor/github.com/hyperhq/hypercli/layer/layer_unix.go delete mode 100644 vendor/github.com/hyperhq/hypercli/layer/layer_windows.go delete mode 100644 vendor/github.com/hyperhq/hypercli/layer/migration.go delete mode 100644 vendor/github.com/hyperhq/hypercli/layer/mounted_layer.go delete mode 100644 vendor/github.com/hyperhq/hypercli/layer/ro_layer.go delete mode 100644 vendor/github.com/hyperhq/hypercli/opts/hosts.go delete mode 100644 vendor/github.com/hyperhq/hypercli/opts/hosts_unix.go delete mode 100644 vendor/github.com/hyperhq/hypercli/opts/hosts_windows.go delete mode 100644 vendor/github.com/hyperhq/hypercli/opts/ip.go delete mode 100644 vendor/github.com/hyperhq/hypercli/opts/opts.go delete mode 100644 vendor/github.com/hyperhq/hypercli/opts/opts_unix.go delete mode 100644 vendor/github.com/hyperhq/hypercli/opts/opts_windows.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/archive/archive.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/archive/archive_unix.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/archive/archive_windows.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/archive/changes.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/archive/changes_linux.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/archive/changes_other.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/archive/changes_unix.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/archive/changes_windows.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/archive/copy.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/archive/copy_unix.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/archive/copy_windows.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/archive/diff.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/archive/example_changes.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/archive/time_linux.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/archive/time_unsupported.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/archive/whiteouts.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/archive/wrap.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/chrootarchive/archive.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/chrootarchive/archive_unix.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/chrootarchive/archive_windows.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/chrootarchive/diff.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/chrootarchive/diff_unix.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/chrootarchive/diff_windows.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/chrootarchive/init_unix.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/chrootarchive/init_windows.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/fileutils/fileutils.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/fileutils/fileutils_unix.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/fileutils/fileutils_windows.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/homedir/homedir.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/httputils/httputils.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/httputils/mimetype.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/httputils/resumablerequestreader.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/idtools/idtools.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/idtools/idtools_unix.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/idtools/idtools_windows.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/idtools/usergroupadd_linux.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/idtools/usergroupadd_unsupported.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/ioutils/bytespipe.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/ioutils/fmt.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/ioutils/multireader.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/ioutils/readers.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/ioutils/scheduler.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/ioutils/scheduler_gccgo.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/ioutils/temp_unix.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/ioutils/temp_windows.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/ioutils/writeflusher.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/ioutils/writers.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/jsonlog/jsonlog.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/jsonlog/jsonlog_marshalling.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/jsonlog/jsonlogbytes.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/jsonlog/time_marshalling.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/jsonmessage/jsonmessage.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/longpath/longpath.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/mflag/LICENSE delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/mflag/flag.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/plugins/client.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/plugins/discovery.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/plugins/errors.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/plugins/plugins.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/pools/pools.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/promise/promise.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/random/random.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/reexec/command_freebsd.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/reexec/command_linux.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/reexec/command_unsupported.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/reexec/command_windows.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/reexec/reexec.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/selfupdate/LICENSE.md delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/stringid/stringid.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/symlink/LICENSE.APACHE delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/symlink/LICENSE.BSD delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/system/chtimes.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/system/errors.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/system/events_windows.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/system/filesys.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/system/filesys_windows.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/system/lstat.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/system/lstat_windows.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/system/meminfo.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/system/meminfo_linux.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/system/meminfo_unsupported.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/system/meminfo_windows.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/system/mknod.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/system/mknod_windows.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/system/path_unix.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/system/path_windows.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/system/stat.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/system/stat_freebsd.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/system/stat_linux.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/system/stat_solaris.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/system/stat_unsupported.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/system/stat_windows.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/system/syscall_unix.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/system/syscall_windows.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/system/umask.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/system/umask_windows.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/system/utimes_darwin.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/system/utimes_freebsd.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/system/utimes_linux.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/system/utimes_unsupported.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/system/xattrs_linux.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/system/xattrs_unsupported.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/tarsum/builder_context.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/tarsum/fileinfosums.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/tarsum/tarsum.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/tarsum/versioning.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/tarsum/writercloser.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/term/ascii.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/term/tc_linux_cgo.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/term/tc_other.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/term/term.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/term/term_windows.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/term/termios_darwin.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/term/termios_freebsd.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/term/termios_linux.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/term/windows/ansi_reader.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/term/windows/ansi_writer.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/term/windows/console.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/term/windows/windows.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/urlutil/urlutil.go delete mode 100644 vendor/github.com/hyperhq/hypercli/pkg/version/version.go delete mode 100644 vendor/github.com/hyperhq/hypercli/project/CONTRIBUTORS.md delete mode 100644 vendor/github.com/hyperhq/hypercli/reference/reference.go delete mode 100644 vendor/github.com/hyperhq/hypercli/reference/store.go delete mode 100644 vendor/github.com/hyperhq/hypercli/registry/auth.go delete mode 100644 vendor/github.com/hyperhq/hypercli/registry/authchallenge.go delete mode 100644 vendor/github.com/hyperhq/hypercli/registry/config.go delete mode 100644 vendor/github.com/hyperhq/hypercli/registry/config_unix.go delete mode 100644 vendor/github.com/hyperhq/hypercli/registry/config_windows.go delete mode 100644 vendor/github.com/hyperhq/hypercli/registry/endpoint.go delete mode 100644 vendor/github.com/hyperhq/hypercli/registry/reference.go delete mode 100644 vendor/github.com/hyperhq/hypercli/registry/registry.go delete mode 100644 vendor/github.com/hyperhq/hypercli/registry/service.go delete mode 100644 vendor/github.com/hyperhq/hypercli/registry/service_v1.go delete mode 100644 vendor/github.com/hyperhq/hypercli/registry/service_v2.go delete mode 100644 vendor/github.com/hyperhq/hypercli/registry/session.go delete mode 100644 vendor/github.com/hyperhq/hypercli/registry/token.go delete mode 100644 vendor/github.com/hyperhq/hypercli/registry/types.go delete mode 100644 vendor/github.com/hyperhq/libcompose/config/convert.go delete mode 100644 vendor/github.com/hyperhq/libcompose/config/hash.go delete mode 100644 vendor/github.com/hyperhq/libcompose/config/interpolation.go delete mode 100644 vendor/github.com/hyperhq/libcompose/config/merge.go delete mode 100644 vendor/github.com/hyperhq/libcompose/config/merge_v1.go delete mode 100644 vendor/github.com/hyperhq/libcompose/config/merge_v2.go delete mode 100644 vendor/github.com/hyperhq/libcompose/config/schema.go delete mode 100644 vendor/github.com/hyperhq/libcompose/config/schema_helpers.go delete mode 100644 vendor/github.com/hyperhq/libcompose/config/types.go delete mode 100644 vendor/github.com/hyperhq/libcompose/config/utils.go delete mode 100644 vendor/github.com/hyperhq/libcompose/config/validation.go delete mode 100644 vendor/github.com/hyperhq/libcompose/utils/util.go delete mode 100644 vendor/github.com/hyperhq/libcompose/yaml/types_yaml.go delete mode 100644 vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt delete mode 100644 vendor/github.com/xeipuuv/gojsonpointer/pointer.go delete mode 100644 vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt delete mode 100644 vendor/github.com/xeipuuv/gojsonreference/reference.go delete mode 100644 vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt delete mode 100644 vendor/github.com/xeipuuv/gojsonschema/errors.go delete mode 100644 vendor/github.com/xeipuuv/gojsonschema/format_checkers.go delete mode 100644 vendor/github.com/xeipuuv/gojsonschema/internalLog.go delete mode 100644 vendor/github.com/xeipuuv/gojsonschema/jsonContext.go delete mode 100644 vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go delete mode 100644 vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/LICENSE delete mode 100644 vendor/github.com/xeipuuv/gojsonschema/locales.go delete mode 100644 vendor/github.com/xeipuuv/gojsonschema/result.go delete mode 100644 vendor/github.com/xeipuuv/gojsonschema/schema.go delete mode 100644 vendor/github.com/xeipuuv/gojsonschema/schemaPool.go delete mode 100644 vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go delete mode 100644 vendor/github.com/xeipuuv/gojsonschema/schemaType.go delete mode 100644 vendor/github.com/xeipuuv/gojsonschema/subSchema.go delete mode 100644 vendor/github.com/xeipuuv/gojsonschema/types.go delete mode 100644 vendor/github.com/xeipuuv/gojsonschema/utils.go delete mode 100644 vendor/github.com/xeipuuv/gojsonschema/validation.go diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index aa86bc1cd..91d3ca4b6 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -43,10 +43,6 @@ Robbie Zhang (junjiez@microsoft.com) Onur Filiz (onfiliz@amazon.com) -**Hyper.sh** - -Harry Zhang (harryzhang@zju.edu.cn) - **Alibaba Cloud** (TBA) diff --git a/Gopkg.lock b/Gopkg.lock index a076e6604..38afe8685 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -198,14 +198,6 @@ revision = "24333298e36590ea0716598caacc8959fc393c48" version = "v0.0.2" -[[projects]] - branch = "master" - digest = "1:dadd2829b5206df1b19eca11995ff94f8a0e1164ed0c54ff7335000aa08840cb" - name = "github.com/cloudfoundry-incubator/candiedyaml" - packages = ["."] - pruneopts = "NUT" - revision = "a41693b7b7afb422c7ecb1028458ab27da047bbb" - [[projects]] digest = "1:62e5b997b5ada9b5f71e759c3474f2a0de8de1b21473bab9e4f98c5aa69c05eb" name = "github.com/cpuguy83/strongerrors" @@ -243,20 +235,13 @@ revision = "6c6132ff69f0f6c088739067407b5d32c52e1d0f" [[projects]] - digest = "1:42656cdee7408cbc2209cbe071acd8e17cae4f06d6c3830422ebd5d65d845acc" + digest = "1:182a67996c78d0ee343dbba889378c35513d428ac31432cd0cf3c9e902abc3c4" name = "github.com/docker/distribution" packages = [ ".", "context", "digest", "reference", - "registry/api/errcode", - "registry/api/v2", - "registry/client", - "registry/client/auth/challenge", - "registry/client/transport", - "registry/storage/cache", - "registry/storage/cache/memory", "uuid", ] pruneopts = "NUT" @@ -314,14 +299,6 @@ pruneopts = "NUT" revision = "49bf474f9ed7ce7143a59d1964ff7b7fd9b52178" -[[projects]] - digest = "1:dcfbd4e9931536ef74fa509eaa0cf905980b527a76ad9d5e52f9dd38a1c9793c" - name = "github.com/docker/engine-api" - packages = ["types/strslice"] - pruneopts = "NUT" - revision = "3d1601b9d2436a70b0dfc045a23f6503d19195df" - version = "v0.4.0" - [[projects]] digest = "1:f133477f38c590bdcd6fc534617df17983f7a21e5b686d4a3495abeb21c631ec" name = "github.com/docker/go-connections" @@ -372,14 +349,6 @@ revision = "72bf35d0ff611848c1dc9df0f976c81192392fa5" version = "v4.1.0" -[[projects]] - branch = "master" - digest = "1:1ccd7321e62f680a988bba496f0f5a9c80410b8104d55b0f6b8ecf84ad328476" - name = "github.com/flynn/go-shlex" - packages = ["."] - pruneopts = "NUT" - revision = "3f9db97f856818214da2e1057f8ad84803971cff" - [[projects]] digest = "1:1b91ae0dc69a41d4c2ed23ea5cffb721ea63f5037ca4b81e6d6771fbb8f45129" name = "github.com/fsnotify/fsnotify" @@ -780,78 +749,6 @@ revision = "08df121c8b9adcc2b8fd55fc8506c3f9714c7e61" version = "v1.0.1" -[[projects]] - digest = "1:ce7c38372fe019fffddf72f1201c4e25548fb2594b813c923bf6d04705581de2" - name = "github.com/hyperhq/hyper-api" - packages = [ - "client", - "client/transport", - "client/transport/cancellable", - "signature", - "types", - "types/blkiodev", - "types/container", - "types/filters", - "types/network", - "types/reference", - "types/registry", - "types/strslice", - "types/time", - "types/versions", - ] - pruneopts = "NUT" - revision = "18c77d3f9fe0abebb41b45c12f383ecac46f4ff1" - -[[projects]] - digest = "1:127e065c02cbc9fa48f0b798db8a5ed679c8e3434cd4d9cbd7c3d8a1b01d204e" - name = "github.com/hyperhq/hypercli" - packages = [ - "cliconfig", - "daemon/graphdriver", - "image", - "image/v1", - "layer", - "opts", - "pkg/archive", - "pkg/chrootarchive", - "pkg/fileutils", - "pkg/homedir", - "pkg/httputils", - "pkg/idtools", - "pkg/ioutils", - "pkg/jsonlog", - "pkg/jsonmessage", - "pkg/longpath", - "pkg/mflag", - "pkg/plugins", - "pkg/pools", - "pkg/promise", - "pkg/random", - "pkg/reexec", - "pkg/stringid", - "pkg/system", - "pkg/tarsum", - "pkg/term", - "pkg/term/windows", - "pkg/urlutil", - "pkg/version", - "reference", - "registry", - ] - pruneopts = "NUT" - revision = "29217d318cab52815518a1126d57ca010de83e4d" - -[[projects]] - digest = "1:7158b13e667602ac37f7f9e2567feec50f31ebb2cb5cab49d2fb4c036e1e0003" - name = "github.com/hyperhq/libcompose" - packages = [ - "config", - "utils", - "yaml", - ] - pruneopts = "NUT" - revision = "15d3a105140f968f5d4f62d2f44afd22a24a98fb" - [[projects]] digest = "1:f0818bc212054788d1086e015b5ba32d01ef8e12c615bbb625570eefbe684a1e" name = "github.com/imdario/mergo" @@ -1305,29 +1202,6 @@ pruneopts = "NUT" revision = "25eff159a728be87e103a0b8045e08273f4dbec4" -[[projects]] - branch = "master" - digest = "1:f15121220068fb01e71ad08b0fdbd1bfaa926be774e7634e8e332c82134079b0" - name = "github.com/xeipuuv/gojsonpointer" - packages = ["."] - pruneopts = "NUT" - revision = "4e3ac2762d5f479393488629ee9370b50873b3a6" - -[[projects]] - branch = "master" - digest = "1:131db546a264d76defd7a4ce233796316b2ab856991cb4b7d6ced2a3c7294ad3" - name = "github.com/xeipuuv/gojsonreference" - packages = ["."] - pruneopts = "NUT" - revision = "bd5ef7bd5415a7ac448318e64f11a24cd21e594b" - -[[projects]] - digest = "1:60e24d485a33cb9bffc041de6a5f1596b8ef8d9a9bb9e9f3834c72fd5a96e76a" - name = "github.com/xeipuuv/gojsonschema" - packages = ["."] - pruneopts = "NUT" - revision = "0c8571ac0ce161a5feb57375a9cdf148c98c0f70" - [[projects]] digest = "1:a3cb0432171318ddfe4c7cc40c6edf94cb6a61859ffa5cec27f4d5162dfc4db7" name = "go.opencensus.io" @@ -1908,26 +1782,11 @@ "github.com/cpuguy83/strongerrors/status", "github.com/cpuguy83/strongerrors/status/ocstatus", "github.com/docker/docker/api/types/strslice", - "github.com/docker/go-connections/nat", - "github.com/docker/go-connections/sockets", - "github.com/docker/go-connections/tlsconfig", "github.com/google/uuid", "github.com/gorilla/mux", "github.com/gorilla/websocket", "github.com/hashicorp/nomad/api", "github.com/hashicorp/nomad/testutil", - "github.com/hyperhq/hyper-api/client", - "github.com/hyperhq/hyper-api/types", - "github.com/hyperhq/hyper-api/types/container", - "github.com/hyperhq/hyper-api/types/filters", - "github.com/hyperhq/hyper-api/types/network", - "github.com/hyperhq/hyper-api/types/registry", - "github.com/hyperhq/hypercli/cliconfig", - "github.com/hyperhq/hypercli/opts", - "github.com/hyperhq/hypercli/pkg/jsonmessage", - "github.com/hyperhq/hypercli/pkg/term", - "github.com/hyperhq/hypercli/reference", - "github.com/hyperhq/hypercli/registry", "github.com/kr/pretty", "github.com/lawrencegripper/pod2docker", "github.com/mitchellh/go-homedir", diff --git a/Gopkg.toml b/Gopkg.toml index b94888cbc..adc7b82ec 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -41,18 +41,10 @@ name = "github.com/google/uuid" version = "0.2.0" -[[constraint]] - name = "github.com/hyperhq/hyper-api" - revision = "18c77d3f9fe0abebb41b45c12f383ecac46f4ff1" - [[constraint]] name = "github.com/gorilla/mux" version = "1.6.0" -[[constraint]] - name = "github.com/hyperhq/hypercli" - revision = "29217d318cab52815518a1126d57ca010de83e4d" - [[constraint]] name = "github.com/Azure/azure-sdk-for-go" version = "21.1.0" @@ -81,19 +73,6 @@ name = "github.com/cpuguy83/strongerrors" version = "0.2.1" -# These are required for HyperHQ -[[override]] - name = "github.com/xeipuuv/gojsonschema" - revision = "0c8571ac0ce161a5feb57375a9cdf148c98c0f70" - -[[override]] - name = "github.com/docker/libcompose" - version = "0.2.0" - -[[override]] - name = "github.com/hyperhq/libcompose" - revision = "15d3a105140f968f5d4f62d2f44afd22a24a98fb" - [[constraint]] branch = "feature/wolfpack" name = "github.com/vmware/vic" diff --git a/ISSUE_TEMPLATE.md b/ISSUE_TEMPLATE.md index 107b79288..d91890408 100644 --- a/ISSUE_TEMPLATE.md +++ b/ISSUE_TEMPLATE.md @@ -3,7 +3,7 @@ ### Environment summary -Provider (e.g. ACI, AWS Fargate, Hyper) +Provider (e.g. ACI, AWS Fargate) Version (e.g. 0.1, 0.2-beta) diff --git a/Makefile b/Makefile index 10625e316..6524a8e51 100644 --- a/Makefile +++ b/Makefile @@ -123,7 +123,7 @@ format: $(GOPATH)/bin/goimports .PHONY: skaffold skaffold: MODE ?= dev skaffold: PROFILE := local -skaffold: VK_BUILD_TAGS ?= no_alibabacloud_provider no_aws_provider no_azure_provider no_azurebatch_provider no_cri_provider no_huawei_provider no_hyper_provider no_vic_provider no_web_provider +skaffold: VK_BUILD_TAGS ?= no_alibabacloud_provider no_aws_provider no_azure_provider no_azurebatch_provider no_cri_provider no_huawei_provider no_vic_provider no_web_provider skaffold: @if [[ ! "minikube,docker-for-desktop" =~ .*"$(kubectl_context)".* ]]; then \ echo current-context is [$(kubectl_context)]. Must be one of [minikube,docker-for-desktop]; false; \ diff --git a/README.md b/README.md index a388debd8..3b40a47a2 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ Virtual Kubelet is an open source [Kubernetes kubelet](https://kubernetes.io/docs/reference/generated/kubelet/) implementation that masquerades as a kubelet for the purposes of connecting Kubernetes to other APIs. -This allows the nodes to be backed by other services like ACI, AWS Fargate, Hyper.sh, [IoT Edge](https://github.com/Azure/iot-edge-virtual-kubelet-provider) etc. The primary scenario for VK is enabling the extension of the Kubernetes API into serverless container platforms like ACI, Fargate, and Hyper.sh, though we are open to others. However, it should be noted that VK is explicitly not intended to be an alternative to Kubernetes federation. +This allows the nodes to be backed by other services like ACI, AWS Fargate, [IoT Edge](https://github.com/Azure/iot-edge-virtual-kubelet-provider) etc. The primary scenario for VK is enabling the extension of the Kubernetes API into serverless container platforms like ACI and Fargate, though we are open to others. However, it should be noted that VK is explicitly not intended to be an alternative to Kubernetes federation. Virtual Kubelet features a pluggable architecture and direct use of Kubernetes primitives, making it much easier to build on. @@ -23,7 +23,6 @@ The best description is "Kubernetes API on top, programmable back." + [Azure Container Instances Provider](#azure-container-instances-provider) + [Azure Batch GPU Provider](./providers/azurebatch/README.md) + [AWS Fargate Provider](#aws-fargate-provider) - + [Hyper.sh Provider](#hypersh-provider) + [Service Fabric Mesh Provider](#service-fabric-mesh-provider) + [HashiCorp Nomad](#hashicorp-nomad-provider) + [Adding a New Provider via the Provider Interface](#adding-a-new-provider-via-the-provider-interface) @@ -178,16 +177,6 @@ co-exist with pods on regular worker nodes in the same Kubernetes cluster. Easy instructions and a sample configuration file is available in the [AWS Fargate provider documentation](providers/aws/README.md). -### Hyper.sh Provider - -The Hyper.sh Provider allows Kubernetes clusters to deploy Hyper.sh containers -and manage both typical pods on VMs and Hyper.sh containers in the same -Kubernetes cluster. - -```bash -./bin/virtual-kubelet --provider hyper -``` - ### Service Fabric Mesh Provider The Service Fabric Mesh Provider allows you to deploy pods to Azure [Service Fabric Mesh](https://docs.microsoft.com/en-us/azure/service-fabric-mesh/service-fabric-mesh-overview). diff --git a/providers/hypersh/README.md b/providers/hypersh/README.md deleted file mode 100644 index f148a94b4..000000000 --- a/providers/hypersh/README.md +++ /dev/null @@ -1,124 +0,0 @@ -hyper.sh provider for virtual-kubelet -===================================== - -# Configure for hyper.sh - -## Use environment variable - -- necessary - - HYPER_ACCESS_KEY - - HYPER_SECRET_KEY -- optional - - HYPER_INSTANCE_TYPE: default s4 - - HYPER_DEFAULT_REGION: default us-west-1 - - HYPER_HOST: tcp://${HYPER_DEFAULT_REGION}.hyper.sh:443 - -> You can use You can use either HYPER_HOST or HYPER_DEFAULT_REGION - - -## Use config file - -> default config file for hyper.sh is ~/.hyper/config.json - -``` -//example configuration file for Hyper.sh -{ - "auths": { - "https://index.docker.io/v1/": { - "auth": "xxxxxx", - "email": "xxxxxx" - }, - }, - "clouds": { - "tcp://*.hyper.sh:443": { - "accesskey": "xxxxxx", - "secretkey": "xxxxxx", - "region": "us-west-1" - } - } -} -``` - -# Usage of virtual-kubelet cli - -``` -// example 1 : use environment variable -export HYPER_ACCESS_KEY=xxxxxx -export HYPER_SECRET_KEY=xxxxxx -export HYPER_DEFAULT_REGION=eu-central-1 -export HYPER_INSTANCE_TYPE=s4 -./virtual-kubelet --provider=hyper - - -// example 2 : use default config file(~/.hyper/config.json) -unset HYPER_ACCESS_KEY -unset HYPER_SECRET_KEY -export HYPER_DEFAULT_REGION=eu-central-1 -./virtual-kubelet --provider=hyper - - -// example 3 : use custom config file, eg: ~/.hyper2/config.json -$ ./virtual-kubelet --provider=hyper --provider-config=$HOME/.hyper2 -``` - - -# Quick Start - -## create pod yaml - -``` -$ cat pod-nginx -apiVersion: v1 -kind: Pod -metadata: - name: nginx -spec: - nodeName: virtual-kubelet - containers: - - name: nginx - image: nginx:latest - ports: - - containerPort: 80 -``` - -## create pod - -``` -$ kubectl create -f pod-nginx -``` - -## list container on hyper.sh - -``` -$ hyper ps -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES PUBLIC IP -a0ae3d4112d5 nginx:latest "nginx -g 'daemon off" 9 seconds ago Up 4 seconds 0.0.0.0:80->80/tcp pod-nginx-nginx -``` - -## server log - -``` -$ export HYPER_DEFAULT_REGION=eu-central-1 -$ ./virtual-kubelet --provider=hyper --provider-config=$HOME/.hyper3 -/home/demo/.kube/config -2017/12/20 17:30:30 config file under "/home/demo/.hyper3" was loaded -2017/12/20 17:30:30 - Host: tcp://eu-central-1.hyper.sh:443 - AccessKey: K********** - SecretKey: 4********** - InstanceType: s4 -2017/12/20 17:30:31 Node 'virtual-kubelet' with OS type 'Linux' registered -2017/12/20 17:30:31 receive GetPods -2017/12/20 17:30:32 found 0 pods -2017/12/20 17:30:37 receive GetPods -2017/12/20 17:30:37 found 0 pods -2017/12/20 17:30:38 Error retrieving pod 'nginx' from provider: Error: No such container: pod-nginx-nginx -2017/12/20 17:30:38 receive CreatePod "nginx" -2017/12/20 17:30:38 container "a0ae3d4112d53023b5972906f2f15c0d34360c132b3c273b286473afad613b63" for pod "nginx" was created -2017/12/20 17:30:43 container "a0ae3d4112d53023b5972906f2f15c0d34360c132b3c273b286473afad613b63" for pod "nginx" was started -2017/12/20 17:30:43 Pod 'nginx' created. -2017/12/20 17:30:43 receive GetPods -2017/12/20 17:30:43 found 1 pods -2017/12/20 17:30:47 receive GetPods -2017/12/20 17:30:47 found 1 pods -``` diff --git a/providers/hypersh/hypersh.go b/providers/hypersh/hypersh.go deleted file mode 100755 index ebf6140bf..000000000 --- a/providers/hypersh/hypersh.go +++ /dev/null @@ -1,457 +0,0 @@ -package hypersh - -import ( - "context" - "fmt" - "io" - "log" - "net/http" - "os" - "runtime" - "time" - - "github.com/cpuguy83/strongerrors" - "github.com/virtual-kubelet/virtual-kubelet/manager" - "github.com/virtual-kubelet/virtual-kubelet/providers" - - "github.com/docker/go-connections/sockets" - "github.com/docker/go-connections/tlsconfig" - hyper "github.com/hyperhq/hyper-api/client" - "github.com/hyperhq/hyper-api/types" - "github.com/hyperhq/hyper-api/types/filters" - "github.com/hyperhq/hyper-api/types/network" - "github.com/hyperhq/hypercli/cliconfig" - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - apitypes "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/tools/remotecommand" -) - -var host = "tcp://*.hyper.sh:443" - -const ( - verStr = "v1.23" - containerLabel = "hyper-virtual-kubelet" - nodeLabel = containerLabel + "-node" - instanceTypeLabel = "sh_hyper_instancetype" -) - -// HyperProvider implements the virtual-kubelet provider interface and communicates with hyper.sh APIs. -type HyperProvider struct { - hyperClient *hyper.Client - configFile *cliconfig.ConfigFile - resourceManager *manager.ResourceManager - nodeName string - operatingSystem string - region string - host string - accessKey string - secretKey string - cpu string - memory string - instanceType string - pods string -} - -// NewHyperProvider creates a new HyperProvider -func NewHyperProvider(config string, rm *manager.ResourceManager, nodeName, operatingSystem string) (*HyperProvider, error) { - var ( - p HyperProvider - err error - host string - dft bool - tlsOptions = &tlsconfig.Options{InsecureSkipVerify: false} - ) - - p.resourceManager = rm - - // Get config from environment variable - if h := os.Getenv("HYPER_HOST"); h != "" { - p.host = h - } - if ak := os.Getenv("HYPER_ACCESS_KEY"); ak != "" { - p.accessKey = ak - } - if sk := os.Getenv("HYPER_SECRET_KEY"); sk != "" { - p.secretKey = sk - } - if p.host == "" { - // ignore HYPER_DEFAULT_REGION when HYPER_HOST was specified - if r := os.Getenv("HYPER_DEFAULT_REGION"); r != "" { - p.region = r - } - } - if it := os.Getenv("HYPER_INSTANCE_TYPE"); it != "" { - p.instanceType = it - } else { - p.instanceType = "s4" - } - - if p.accessKey != "" || p.secretKey != "" { - //use environment variable - if p.accessKey == "" || p.secretKey == "" { - return nil, fmt.Errorf("WARNING: Need to specify HYPER_ACCESS_KEY and HYPER_SECRET_KEY at the same time.") - } - log.Printf("Use AccessKey and SecretKey from HYPER_ACCESS_KEY and HYPER_SECRET_KEY") - if p.region == "" { - p.region = cliconfig.DefaultHyperRegion - } - if p.host == "" { - host, _, err = p.getServerHost(p.region, tlsOptions) - if err != nil { - return nil, err - } - p.host = host - } - } else { - // use config file, default path is ~/.hyper - if config == "" { - config = cliconfig.ConfigDir() - } - configFile, err := cliconfig.Load(config) - if err != nil { - return nil, fmt.Errorf("WARNING: Error loading config file %q: %v\n", config, err) - } - p.configFile = configFile - log.Printf("config file under %q was loaded\n", config) - - if p.host == "" { - host, dft, err = p.getServerHost(p.region, tlsOptions) - if err != nil { - return nil, err - } - p.host = host - } - // Get Region, AccessKey and SecretKey from config file - cc, ok := configFile.CloudConfig[p.host] - if !ok { - cc, ok = configFile.CloudConfig[cliconfig.DefaultHyperFormat] - } - if ok { - p.accessKey = cc.AccessKey - p.secretKey = cc.SecretKey - - if p.region == "" && dft { - if p.region = cc.Region; p.region == "" { - p.region = p.getDefaultRegion() - } - } - if !dft { - if p.region = cc.Region; p.region == "" { - p.region = cliconfig.DefaultHyperRegion - } - } - } else { - return nil, fmt.Errorf("WARNING: can not find entrypoint %q in config file", cliconfig.DefaultHyperFormat) - } - if p.accessKey == "" || p.secretKey == "" { - return nil, fmt.Errorf("WARNING: AccessKey or SecretKey is empty in config %q", config) - } - } - - log.Printf("\n Host: %s\n AccessKey: %s**********\n SecretKey: %s**********\n InstanceType: %s\n", p.host, p.accessKey[0:1], p.secretKey[0:1], p.instanceType) - httpClient, err := newHTTPClient(p.host, tlsOptions) - - customHeaders := map[string]string{} - ver := "0.1" - customHeaders["User-Agent"] = fmt.Sprintf("Virtual-Kubelet-Client/%s (%s)", ver, runtime.GOOS) - - p.operatingSystem = operatingSystem - p.nodeName = nodeName - - p.hyperClient, err = hyper.NewClient(p.host, verStr, httpClient, customHeaders, p.accessKey, p.secretKey, p.region) - if err != nil { - return nil, err - } - //test connect to hyper.sh - _, err = p.hyperClient.Info(context.Background()) - if err != nil { - return nil, err - } - return &p, nil -} - -func newHTTPClient(host string, tlsOptions *tlsconfig.Options) (*http.Client, error) { - if tlsOptions == nil { - // let the api client configure the default transport. - return nil, nil - } - - config, err := tlsconfig.Client(*tlsOptions) - if err != nil { - return nil, err - } - tr := &http.Transport{ - TLSClientConfig: config, - } - proto, addr, _, err := hyper.ParseHost(host) - if err != nil { - return nil, err - } - - sockets.ConfigureTransport(tr, proto, addr) - - return &http.Client{ - Transport: tr, - }, nil -} - -// CreatePod accepts a Pod definition and creates -// a hyper.sh deployment -func (p *HyperProvider) CreatePod(ctx context.Context, pod *v1.Pod) error { - log.Printf("receive CreatePod %q\n", pod.Name) - - //Ignore daemonSet Pod - if pod != nil && pod.OwnerReferences != nil && len(pod.OwnerReferences) != 0 && pod.OwnerReferences[0].Kind == "DaemonSet" { - log.Printf("Skip to create DaemonSet pod %q\n", pod.Name) - return nil - } - - // Get containers - containers, hostConfigs, err := p.getContainers(pod) - if err != nil { - return err - } - - // TODO: get volumes - - // Iterate over the containers to create and start them. - for k, ctr := range containers { - //one container in a Pod in hyper.sh currently - containerName := fmt.Sprintf("pod-%s-%s", pod.Name, pod.Spec.Containers[k].Name) - - if err = p.ensureImage(ctr.Image); err != nil { - return err - } - - // Add labels to the pod containers. - ctr.Labels = map[string]string{ - containerLabel: pod.Name, - nodeLabel: p.nodeName, - instanceTypeLabel: p.instanceType, - } - hostConfigs[k].NetworkMode = "bridge" - - // Create the container. - resp, err := p.hyperClient.ContainerCreate(context.Background(), &ctr, &hostConfigs[k], &network.NetworkingConfig{}, containerName) - if err != nil { - return err - } - log.Printf("container %q for pod %q was created\n", resp.ID, pod.Name) - - // Iterate throught the warnings. - for _, warning := range resp.Warnings { - log.Printf("warning while creating container %q for pod %q: %s", containerName, pod.Name, warning) - } - - // Start the container. - if err := p.hyperClient.ContainerStart(context.Background(), resp.ID, ""); err != nil { - return err - } - log.Printf("container %q for pod %q was started\n", resp.ID, pod.Name) - } - return nil -} - -// UpdatePod is a noop, hyper.sh currently does not support live updates of a pod. -func (p *HyperProvider) UpdatePod(ctx context.Context, pod *v1.Pod) error { - return nil -} - -// DeletePod deletes the specified pod out of hyper.sh. -func (p *HyperProvider) DeletePod(ctx context.Context, pod *v1.Pod) (err error) { - log.Printf("receive DeletePod %q\n", pod.Name) - var ( - containerName = fmt.Sprintf("pod-%s-%s", pod.Name, pod.Name) - container types.ContainerJSON - ) - // Inspect hyper container - container, err = p.hyperClient.ContainerInspect(context.Background(), containerName) - if err != nil { - if hyper.IsErrContainerNotFound(err) { - return strongerrors.NotFound(err) - } - return err - } - // Check container label - if v, ok := container.Config.Labels[containerLabel]; ok { - // Check value of label - if v != pod.Name { - return fmt.Errorf("the label %q of hyper container %q should be %q, but it's %q currently", containerLabel, container.Name, pod.Name, v) - } - rmOptions := types.ContainerRemoveOptions{ - RemoveVolumes: true, - Force: true, - } - // Delete hyper container - resp, err := p.hyperClient.ContainerRemove(context.Background(), container.ID, rmOptions) - if err != nil { - return err - } - // Iterate throught the warnings. - for _, warning := range resp { - log.Printf("warning while deleting container %q for pod %q: %s", container.ID, pod.Name, warning) - } - log.Printf("container %q for pod %q was deleted\n", container.ID, pod.Name) - } else { - return fmt.Errorf("hyper container %q has no label %q", container.Name, containerLabel) - } - return nil -} - -// GetPod returns a pod by name that is running inside hyper.sh -// returns nil if a pod by that name is not found. -func (p *HyperProvider) GetPod(ctx context.Context, namespace, name string) (pod *v1.Pod, err error) { - var ( - containerName = fmt.Sprintf("pod-%s-%s", name, name) - container types.ContainerJSON - ) - // Inspect hyper container - container, err = p.hyperClient.ContainerInspect(context.Background(), containerName) - if err != nil { - return nil, err - } - // Convert hyper container into Pod - pod, err = p.containerJSONToPod(&container) - if err != nil { - return nil, err - } else { - return pod, nil - } -} - -// GetContainerLogs retrieves the logs of a container by name from the provider. -func (p *HyperProvider) GetContainerLogs(ctx context.Context, namespace, podName, containerName string, tail int) (string, error) { - return "", nil -} - -// Get full pod name as defined in the provider context -// TODO: Implementation -func (p *HyperProvider) GetPodFullName(namespace string, pod string) string { - return "" -} - -// ExecInContainer executes a command in a container in the pod, copying data -// between in/out/err and the container's stdin/stdout/stderr. -// TODO: Implementation -func (p *HyperProvider) ExecInContainer(name string, uid apitypes.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error { - log.Printf("receive ExecInContainer %q\n", container) - return nil -} - -// GetPodStatus returns the status of a pod by name that is running inside hyper.sh -// returns nil if a pod by that name is not found. -func (p *HyperProvider) GetPodStatus(ctx context.Context, namespace, name string) (*v1.PodStatus, error) { - pod, err := p.GetPod(ctx, namespace, name) - if err != nil { - return nil, err - } - return &pod.Status, nil -} - -// GetPods returns a list of all pods known to be running within hyper.sh. -func (p *HyperProvider) GetPods(ctx context.Context) ([]*v1.Pod, error) { - log.Printf("receive GetPods\n") - filter, err := filters.FromParam(fmt.Sprintf("{\"label\":{\"%s=%s\":true}}", nodeLabel, p.nodeName)) - if err != nil { - return nil, err - } - // Filter by label. - containers, err := p.hyperClient.ContainerList(context.Background(), types.ContainerListOptions{ - Filter: filter, - All: true, - }) - if err != nil { - return nil, err - } - log.Printf("found %d pods\n", len(containers)) - - var pods = []*v1.Pod{} - for _, container := range containers { - pod, err := p.containerToPod(&container) - if err != nil { - log.Printf("WARNING: convert container %q to pod error: %v\n", container.ID, err) - continue - } - pods = append(pods, pod) - } - return pods, nil -} - -// Capacity returns a resource list containing the capacity limits set for hyper.sh. -func (p *HyperProvider) Capacity(ctx context.Context) v1.ResourceList { - // TODO: These should be configurable - return v1.ResourceList{ - "cpu": resource.MustParse("20"), - "memory": resource.MustParse("100Gi"), - "pods": resource.MustParse("20"), - } -} - -// NodeConditions returns a list of conditions (Ready, OutOfDisk, etc), for updates to the node status -// within Kubernetes. -func (p *HyperProvider) NodeConditions(ctx context.Context) []v1.NodeCondition { - // TODO: Make these dynamic and augment with custom hyper.sh specific conditions of interest - return []v1.NodeCondition{ - { - Type: "Ready", - Status: v1.ConditionTrue, - LastHeartbeatTime: metav1.Now(), - LastTransitionTime: metav1.Now(), - Reason: "KubeletReady", - Message: "kubelet is ready.", - }, - { - Type: "OutOfDisk", - Status: v1.ConditionFalse, - LastHeartbeatTime: metav1.Now(), - LastTransitionTime: metav1.Now(), - Reason: "KubeletHasSufficientDisk", - Message: "kubelet has sufficient disk space available", - }, - { - Type: "MemoryPressure", - Status: v1.ConditionFalse, - LastHeartbeatTime: metav1.Now(), - LastTransitionTime: metav1.Now(), - Reason: "KubeletHasSufficientMemory", - Message: "kubelet has sufficient memory available", - }, - { - Type: "DiskPressure", - Status: v1.ConditionFalse, - LastHeartbeatTime: metav1.Now(), - LastTransitionTime: metav1.Now(), - Reason: "KubeletHasNoDiskPressure", - Message: "kubelet has no disk pressure", - }, - { - Type: "NetworkUnavailable", - Status: v1.ConditionFalse, - LastHeartbeatTime: metav1.Now(), - LastTransitionTime: metav1.Now(), - Reason: "RouteCreated", - Message: "RouteController created a route", - }, - } - -} - -// NodeAddresses returns a list of addresses for the node status -// within Kubernetes. -func (p *HyperProvider) NodeAddresses(ctx context.Context) []v1.NodeAddress { - return nil -} - -// NodeDaemonEndpoints returns NodeDaemonEndpoints for the node status -// within Kubernetes. -func (p *HyperProvider) NodeDaemonEndpoints(ctx context.Context) *v1.NodeDaemonEndpoints { - return &v1.NodeDaemonEndpoints{} -} - -// OperatingSystem returns the operating system for this provider. -// This is a noop to default to Linux for now. -func (p *HyperProvider) OperatingSystem() string { - return providers.OperatingSystemLinux -} diff --git a/providers/hypersh/util.go b/providers/hypersh/util.go deleted file mode 100644 index 0405c7216..000000000 --- a/providers/hypersh/util.go +++ /dev/null @@ -1,419 +0,0 @@ -package hypersh - -import ( - "context" - "encoding/base64" - "encoding/json" - "fmt" - "log" - "net/url" - "os" - "strings" - "time" - - "github.com/docker/go-connections/nat" - "github.com/docker/go-connections/tlsconfig" - "github.com/hyperhq/hyper-api/types" - "github.com/hyperhq/hyper-api/types/container" - registrytypes "github.com/hyperhq/hyper-api/types/registry" - "github.com/hyperhq/hypercli/cliconfig" - "github.com/hyperhq/hypercli/opts" - "github.com/hyperhq/hypercli/pkg/jsonmessage" - "github.com/hyperhq/hypercli/pkg/term" - "github.com/hyperhq/hypercli/reference" - "github.com/hyperhq/hypercli/registry" - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func (p *HyperProvider) getContainers(pod *v1.Pod) ([]container.Config, []container.HostConfig, error) { - containers := make([]container.Config, len(pod.Spec.Containers)) - hostConfigs := make([]container.HostConfig, len(pod.Spec.Containers)) - for x, ctr := range pod.Spec.Containers { - // Do container.Config - var c container.Config - c.Image = ctr.Image - c.Cmd = ctr.Command - ports := map[nat.Port]struct{}{} - portBindings := nat.PortMap{} - for _, p := range ctr.Ports { - //TODO: p.HostPort is 0 by default, but it's invalid in hyper.sh - if p.HostPort == 0 { - p.HostPort = p.ContainerPort - } - port, err := nat.NewPort(strings.ToLower(string(p.Protocol)), fmt.Sprintf("%d", p.HostPort)) - if err != nil { - return nil, nil, fmt.Errorf("creating new port in container conversion failed: %v", err) - } - ports[port] = struct{}{} - - portBindings[port] = []nat.PortBinding{ - { - HostIP: "0.0.0.0", - HostPort: fmt.Sprintf("%d", p.HostPort), - }, - } - } - c.ExposedPorts = ports - - // TODO: do volumes - - envs := make([]string, len(ctr.Env)) - for z, e := range ctr.Env { - envs[z] = fmt.Sprintf("%s=%s", e.Name, e.Value) - } - c.Env = envs - - // Do container.HostConfig - var hc container.HostConfig - cpuLimit := ctr.Resources.Limits.Cpu().Value() - memoryLimit := ctr.Resources.Limits.Memory().Value() - - hc.Resources = container.Resources{ - CPUShares: cpuLimit, - Memory: memoryLimit, - } - - hc.PortBindings = portBindings - - containers[x] = c - hostConfigs[x] = hc - } - return containers, hostConfigs, nil -} - -func (p *HyperProvider) containerJSONToPod(container *types.ContainerJSON) (*v1.Pod, error) { - podName, found := container.Config.Labels[containerLabel] - if !found { - return nil, fmt.Errorf("can not found podName: key %q not found in container label", containerLabel) - } - - nodeName, found := container.Config.Labels[nodeLabel] - if !found { - return nil, fmt.Errorf("can not found nodeName: key %q not found in container label", containerLabel) - } - - created, err := time.Parse(time.RFC3339, container.Created) - if err != nil { - return nil, fmt.Errorf("parse Created time failed:%v", container.Created) - } - startedAt, err := time.Parse(time.RFC3339, container.State.StartedAt) - if err != nil { - return nil, fmt.Errorf("parse StartedAt time failed:%v", container.State.StartedAt) - } - finishedAt, err := time.Parse(time.RFC3339, container.State.FinishedAt) - if err != nil { - return nil, fmt.Errorf("parse FinishedAt time failed:%v", container.State.FinishedAt) - } - - var ( - podCondition v1.PodCondition - containerState v1.ContainerState - ) - switch p.hyperStateToPodPhase(container.State.Status) { - case v1.PodPending: - podCondition = v1.PodCondition{ - Type: v1.PodInitialized, - Status: v1.ConditionFalse, - } - containerState = v1.ContainerState{ - Waiting: &v1.ContainerStateWaiting{}, - } - case v1.PodRunning: // running - podCondition = v1.PodCondition{ - Type: v1.PodReady, - Status: v1.ConditionTrue, - } - containerState = v1.ContainerState{ - Running: &v1.ContainerStateRunning{ - StartedAt: metav1.NewTime(startedAt), - }, - } - case v1.PodSucceeded: // normal exit - podCondition = v1.PodCondition{ - Type: v1.PodReasonUnschedulable, - Status: v1.ConditionFalse, - } - containerState = v1.ContainerState{ - Terminated: &v1.ContainerStateTerminated{ - ExitCode: int32(container.State.ExitCode), - FinishedAt: metav1.NewTime(finishedAt), - }, - } - case v1.PodFailed: // exit with error - podCondition = v1.PodCondition{ - Type: v1.PodReasonUnschedulable, - Status: v1.ConditionFalse, - } - containerState = v1.ContainerState{ - Terminated: &v1.ContainerStateTerminated{ - ExitCode: int32(container.State.ExitCode), - FinishedAt: metav1.NewTime(finishedAt), - Reason: container.State.Error, - }, - } - default: //unkown - podCondition = v1.PodCondition{ - Type: v1.PodReasonUnschedulable, - Status: v1.ConditionUnknown, - } - containerState = v1.ContainerState{} - } - - pod := v1.Pod{ - TypeMeta: metav1.TypeMeta{ - Kind: "Pod", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: podName, - Namespace: "default", - CreationTimestamp: metav1.NewTime(created), - }, - Spec: v1.PodSpec{ - NodeName: nodeName, - Volumes: []v1.Volume{}, - Containers: []v1.Container{ - { - Name: podName, - Image: container.Config.Image, - Command: container.Config.Cmd, - }, - }, - }, - Status: v1.PodStatus{ - Phase: p.hyperStateToPodPhase(container.State.Status), - Conditions: []v1.PodCondition{podCondition}, - Message: "", - Reason: "", - HostIP: "", - PodIP: container.NetworkSettings.IPAddress, - ContainerStatuses: []v1.ContainerStatus{ - { - Name: podName, - RestartCount: int32(container.RestartCount), - Image: container.Config.Image, - ImageID: container.Image, - ContainerID: container.ID, - Ready: container.State.Running, - State: containerState, - }, - }, - }, - } - return &pod, nil -} - -func (p *HyperProvider) containerToPod(container *types.Container) (*v1.Pod, error) { - // TODO: convert containers into pods - podName, found := container.Labels[containerLabel] - if !found { - return nil, fmt.Errorf("can not found podName: key %q not found in container label", containerLabel) - } - - nodeName, found := container.Labels[nodeLabel] - if !found { - return nil, fmt.Errorf("can not found nodeName: key %q not found in container label", containerLabel) - } - - var ( - podCondition v1.PodCondition - isReady bool = true - ) - if strings.ToLower(string(container.State)) == strings.ToLower(string(v1.PodRunning)) { - podCondition = v1.PodCondition{ - Type: v1.PodReady, - Status: v1.ConditionTrue, - } - } else { - podCondition = v1.PodCondition{ - Type: v1.PodReasonUnschedulable, - Status: v1.ConditionFalse, - } - isReady = false - } - - pod := v1.Pod{ - TypeMeta: metav1.TypeMeta{ - Kind: "Pod", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: podName, - Namespace: "default", - ClusterName: "", - UID: "", - CreationTimestamp: metav1.NewTime(time.Unix(container.Created, 0)), - }, - Spec: v1.PodSpec{ - NodeName: nodeName, - Volumes: []v1.Volume{}, - Containers: []v1.Container{ - { - Name: podName, - Image: container.Image, - Command: strings.Split(container.Command, " "), - Resources: v1.ResourceRequirements{}, - }, - }, - }, - Status: v1.PodStatus{ - //Phase: "", - Conditions: []v1.PodCondition{podCondition}, - Message: "", - Reason: "", - HostIP: "", - PodIP: "", - ContainerStatuses: []v1.ContainerStatus{ - { - Name: container.Names[0], - Image: container.Image, - ImageID: container.ImageID, - ContainerID: container.ID, - Ready: isReady, - State: v1.ContainerState{}, - }, - }, - }, - } - return &pod, nil -} - -func (p *HyperProvider) hyperStateToPodPhase(state string) v1.PodPhase { - switch strings.ToLower(state) { - case "created": - return v1.PodPending - case "restarting": - return v1.PodPending - case "running": - return v1.PodRunning - case "exited": - return v1.PodSucceeded - case "paused": - return v1.PodSucceeded - case "dead": - return v1.PodFailed - } - return v1.PodUnknown -} - -func (p *HyperProvider) getServerHost(region string, tlsOptions *tlsconfig.Options) (host string, dft bool, err error) { - dft = false - host = region - if host == "" { - host = os.Getenv("HYPER_DEFAULT_REGION") - region = p.getDefaultRegion() - } - if _, err := url.ParseRequestURI(host); err != nil { - host = "tcp://" + region + "." + cliconfig.DefaultHyperEndpoint - dft = true - } - host, err = opts.ParseHost(tlsOptions != nil, host) - return -} - -func (p *HyperProvider) getDefaultRegion() string { - cc, ok := p.configFile.CloudConfig[cliconfig.DefaultHyperFormat] - if ok && cc.Region != "" { - return cc.Region - } - return cliconfig.DefaultHyperRegion -} - -func (p *HyperProvider) ensureImage(image string) error { - distributionRef, err := reference.ParseNamed(image) - if err != nil { - return err - } - - if reference.IsNameOnly(distributionRef) { - distributionRef = reference.WithDefaultTag(distributionRef) - log.Printf("Using default tag: %s", reference.DefaultTag) - } - - // Resolve the Repository name from fqn to RepositoryInfo - repoInfo, err := registry.ParseRepositoryInfo(distributionRef) - var authConfig types.AuthConfig - if p.configFile != nil { - authConfig = p.resolveAuthConfig(p.configFile.AuthConfigs, repoInfo.Index) - } - encodedAuth, err := p.encodeAuthToBase64(authConfig) - if err != nil { - return err - } - - options := types.ImagePullOptions{ - RegistryAuth: encodedAuth, - All: false, - } - responseBody, err := p.hyperClient.ImagePull(context.Background(), distributionRef.String(), options) - if err != nil { - return err - } - defer responseBody.Close() - var ( - outFd uintptr - isTerminalOut bool - ) - _, stdout, _ := term.StdStreams() - if stdout != nil { - outFd, isTerminalOut = term.GetFdInfo(stdout) - } - jsonmessage.DisplayJSONMessagesStream(responseBody, stdout, outFd, isTerminalOut, nil) - return nil -} - -func (p *HyperProvider) resolveAuthConfig(authConfigs map[string]types.AuthConfig, index *registrytypes.IndexInfo) types.AuthConfig { - configKey := index.Name - if index.Official { - configKey = p.electAuthServer() - } - - // First try the happy case - if c, found := authConfigs[configKey]; found || index.Official { - return c - } - - convertToHostname := func(url string) string { - stripped := url - if strings.HasPrefix(url, "http://") { - stripped = strings.Replace(url, "http://", "", 1) - } else if strings.HasPrefix(url, "https://") { - stripped = strings.Replace(url, "https://", "", 1) - } - - nameParts := strings.SplitN(stripped, "/", 2) - - return nameParts[0] - } - - // Maybe they have a legacy config file, we will iterate the keys converting - // them to the new format and testing - for registry, ac := range authConfigs { - if configKey == convertToHostname(registry) { - return ac - } - } - - // When all else fails, return an empty auth config - return types.AuthConfig{} -} - -func (p *HyperProvider) electAuthServer() string { - serverAddress := registry.IndexServer - if info, err := p.hyperClient.Info(context.Background()); err != nil { - log.Printf("Warning: failed to get default registry endpoint from daemon (%v). Using system default: %s", err, serverAddress) - } else { - serverAddress = info.IndexServerAddress - } - return serverAddress -} - -// encodeAuthToBase64 serializes the auth configuration as JSON base64 payload -func (p *HyperProvider) encodeAuthToBase64(authConfig types.AuthConfig) (string, error) { - buf, err := json.Marshal(authConfig) - if err != nil { - return "", err - } - return base64.URLEncoding.EncodeToString(buf), nil -} diff --git a/providers/register/provider_hyper.go b/providers/register/provider_hyper.go deleted file mode 100644 index ac43af5d5..000000000 --- a/providers/register/provider_hyper.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build !no_hyper_provider - -package register - -import ( - "github.com/virtual-kubelet/virtual-kubelet/providers" - "github.com/virtual-kubelet/virtual-kubelet/providers/hypersh" -) - -func init() { - register("hyper", initHyper) -} - -func initHyper(cfg InitConfig) (providers.Provider, error) { - return hypersh.NewHyperProvider(cfg.ConfigPath, cfg.ResourceManager, cfg.NodeName, cfg.OperatingSystem) -} diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/LICENSE b/vendor/github.com/cloudfoundry-incubator/candiedyaml/LICENSE deleted file mode 100644 index f49a4e16e..000000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/NOTICE b/vendor/github.com/cloudfoundry-incubator/candiedyaml/NOTICE deleted file mode 100644 index 5f6236293..000000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/NOTICE +++ /dev/null @@ -1,18 +0,0 @@ -Copyright (c) 2015-Present CloudFoundry.org Foundation, Inc. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -This project may include a number of subcomponents with separate -copyright notices and license terms. Your use of these subcomponents -is subject to the terms and conditions of each subcomponent's license, -as noted in the LICENSE file. diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/api.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/api.go deleted file mode 100644 index 87c1043ea..000000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/api.go +++ /dev/null @@ -1,834 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -import ( - "io" -) - -/* - * Create a new parser object. - */ - -func yaml_parser_initialize(parser *yaml_parser_t) bool { - *parser = yaml_parser_t{ - raw_buffer: make([]byte, 0, INPUT_RAW_BUFFER_SIZE), - buffer: make([]byte, 0, INPUT_BUFFER_SIZE), - } - - return true -} - -/* - * Destroy a parser object. - */ -func yaml_parser_delete(parser *yaml_parser_t) { - *parser = yaml_parser_t{} -} - -/* - * String read handler. - */ - -func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (int, error) { - if parser.input_pos == len(parser.input) { - return 0, io.EOF - } - - n := copy(buffer, parser.input[parser.input_pos:]) - parser.input_pos += n - return n, nil -} - -/* - * File read handler. - */ - -func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (int, error) { - return parser.input_reader.Read(buffer) -} - -/* - * Set a string input. - */ - -func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { - if parser.read_handler != nil { - panic("input already set") - } - - parser.read_handler = yaml_string_read_handler - - parser.input = input - parser.input_pos = 0 -} - -/* - * Set a reader input - */ -func yaml_parser_set_input_reader(parser *yaml_parser_t, reader io.Reader) { - if parser.read_handler != nil { - panic("input already set") - } - - parser.read_handler = yaml_file_read_handler - parser.input_reader = reader -} - -/* - * Set a generic input. - */ - -func yaml_parser_set_input(parser *yaml_parser_t, handler yaml_read_handler_t) { - if parser.read_handler != nil { - panic("input already set") - } - - parser.read_handler = handler -} - -/* - * Set the source encoding. - */ - -func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { - if parser.encoding != yaml_ANY_ENCODING { - panic("encoding already set") - } - - parser.encoding = encoding -} - -/* - * Create a new emitter object. - */ - -func yaml_emitter_initialize(emitter *yaml_emitter_t) { - *emitter = yaml_emitter_t{ - buffer: make([]byte, OUTPUT_BUFFER_SIZE), - raw_buffer: make([]byte, 0, OUTPUT_RAW_BUFFER_SIZE), - states: make([]yaml_emitter_state_t, 0, INITIAL_STACK_SIZE), - events: make([]yaml_event_t, 0, INITIAL_QUEUE_SIZE), - } -} - -func yaml_emitter_delete(emitter *yaml_emitter_t) { - *emitter = yaml_emitter_t{} -} - -/* - * String write handler. - */ - -func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - *emitter.output_buffer = append(*emitter.output_buffer, buffer...) - return nil -} - -/* - * File write handler. - */ - -func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - _, err := emitter.output_writer.Write(buffer) - return err -} - -/* - * Set a string output. - */ - -func yaml_emitter_set_output_string(emitter *yaml_emitter_t, buffer *[]byte) { - if emitter.write_handler != nil { - panic("output already set") - } - - emitter.write_handler = yaml_string_write_handler - emitter.output_buffer = buffer -} - -/* - * Set a file output. - */ - -func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { - if emitter.write_handler != nil { - panic("output already set") - } - - emitter.write_handler = yaml_writer_write_handler - emitter.output_writer = w -} - -/* - * Set a generic output handler. - */ - -func yaml_emitter_set_output(emitter *yaml_emitter_t, handler yaml_write_handler_t) { - if emitter.write_handler != nil { - panic("output already set") - } - - emitter.write_handler = handler -} - -/* - * Set the output encoding. - */ - -func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { - if emitter.encoding != yaml_ANY_ENCODING { - panic("encoding already set") - } - - emitter.encoding = encoding -} - -/* - * Set the canonical output style. - */ - -func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { - emitter.canonical = canonical -} - -/* - * Set the indentation increment. - */ - -func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { - if indent < 2 || indent > 9 { - indent = 2 - } - emitter.best_indent = indent -} - -/* - * Set the preferred line width. - */ - -func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { - if width < 0 { - width = -1 - } - emitter.best_width = width -} - -/* - * Set if unescaped non-ASCII characters are allowed. - */ - -func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { - emitter.unicode = unicode -} - -/* - * Set the preferred line break character. - */ - -func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { - emitter.line_break = line_break -} - -/* - * Destroy a token object. - */ - -// yaml_DECLARE(void) -// yaml_token_delete(yaml_token_t *token) -// { -// assert(token); /* Non-NULL token object expected. */ -// -// switch (token.type) -// { -// case yaml_TAG_DIRECTIVE_TOKEN: -// yaml_free(token.data.tag_directive.handle); -// yaml_free(token.data.tag_directive.prefix); -// break; -// -// case yaml_ALIAS_TOKEN: -// yaml_free(token.data.alias.value); -// break; -// -// case yaml_ANCHOR_TOKEN: -// yaml_free(token.data.anchor.value); -// break; -// -// case yaml_TAG_TOKEN: -// yaml_free(token.data.tag.handle); -// yaml_free(token.data.tag.suffix); -// break; -// -// case yaml_SCALAR_TOKEN: -// yaml_free(token.data.scalar.value); -// break; -// -// default: -// break; -// } -// -// memset(token, 0, sizeof(yaml_token_t)); -// } - -/* - * Check if a string is a valid UTF-8 sequence. - * - * Check 'reader.c' for more details on UTF-8 encoding. - */ - -// static int -// yaml_check_utf8(yaml_char_t *start, size_t length) -// { -// yaml_char_t *end = start+length; -// yaml_char_t *pointer = start; -// -// while (pointer < end) { -// unsigned char octet; -// unsigned int width; -// unsigned int value; -// size_t k; -// -// octet = pointer[0]; -// width = (octet & 0x80) == 0x00 ? 1 : -// (octet & 0xE0) == 0xC0 ? 2 : -// (octet & 0xF0) == 0xE0 ? 3 : -// (octet & 0xF8) == 0xF0 ? 4 : 0; -// value = (octet & 0x80) == 0x00 ? octet & 0x7F : -// (octet & 0xE0) == 0xC0 ? octet & 0x1F : -// (octet & 0xF0) == 0xE0 ? octet & 0x0F : -// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; -// if (!width) return 0; -// if (pointer+width > end) return 0; -// for (k = 1; k < width; k ++) { -// octet = pointer[k]; -// if ((octet & 0xC0) != 0x80) return 0; -// value = (value << 6) + (octet & 0x3F); -// } -// if (!((width == 1) || -// (width == 2 && value >= 0x80) || -// (width == 3 && value >= 0x800) || -// (width == 4 && value >= 0x10000))) return 0; -// -// pointer += width; -// } -// -// return 1; -// } - -/* - * Create STREAM-START. - */ - -func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { - *event = yaml_event_t{ - event_type: yaml_STREAM_START_EVENT, - encoding: encoding, - } -} - -/* - * Create STREAM-END. - */ - -func yaml_stream_end_event_initialize(event *yaml_event_t) { - *event = yaml_event_t{ - event_type: yaml_STREAM_END_EVENT, - } -} - -/* - * Create DOCUMENT-START. - */ - -func yaml_document_start_event_initialize(event *yaml_event_t, - version_directive *yaml_version_directive_t, - tag_directives []yaml_tag_directive_t, - implicit bool) { - *event = yaml_event_t{ - event_type: yaml_DOCUMENT_START_EVENT, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: implicit, - } -} - -/* - * Create DOCUMENT-END. - */ - -func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { - *event = yaml_event_t{ - event_type: yaml_DOCUMENT_END_EVENT, - implicit: implicit, - } -} - -/* - * Create ALIAS. - */ - -func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) { - *event = yaml_event_t{ - event_type: yaml_ALIAS_EVENT, - anchor: anchor, - } -} - -/* - * Create SCALAR. - */ - -func yaml_scalar_event_initialize(event *yaml_event_t, - anchor []byte, tag []byte, - value []byte, - plain_implicit bool, quoted_implicit bool, - style yaml_scalar_style_t) { - - *event = yaml_event_t{ - event_type: yaml_SCALAR_EVENT, - anchor: anchor, - tag: tag, - value: value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(style), - } -} - -/* - * Create SEQUENCE-START. - */ - -func yaml_sequence_start_event_initialize(event *yaml_event_t, - anchor []byte, tag []byte, implicit bool, style yaml_sequence_style_t) { - *event = yaml_event_t{ - event_type: yaml_SEQUENCE_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } -} - -/* - * Create SEQUENCE-END. - */ - -func yaml_sequence_end_event_initialize(event *yaml_event_t) { - *event = yaml_event_t{ - event_type: yaml_SEQUENCE_END_EVENT, - } -} - -/* - * Create MAPPING-START. - */ - -func yaml_mapping_start_event_initialize(event *yaml_event_t, - anchor []byte, tag []byte, implicit bool, style yaml_mapping_style_t) { - *event = yaml_event_t{ - event_type: yaml_MAPPING_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } -} - -/* - * Create MAPPING-END. - */ - -func yaml_mapping_end_event_initialize(event *yaml_event_t) { - *event = yaml_event_t{ - event_type: yaml_MAPPING_END_EVENT, - } -} - -/* - * Destroy an event object. - */ - -func yaml_event_delete(event *yaml_event_t) { - *event = yaml_event_t{} -} - -// /* -// * Create a document object. -// */ -// -// func yaml_document_initialize(document *yaml_document_t, -// version_directive *yaml_version_directive_t, -// tag_directives []yaml_tag_directive_t, -// start_implicit, end_implicit bool) bool { -// -// -// { -// struct { -// YAML_error_type_t error; -// } context; -// struct { -// yaml_node_t *start; -// yaml_node_t *end; -// yaml_node_t *top; -// } nodes = { NULL, NULL, NULL }; -// yaml_version_directive_t *version_directive_copy = NULL; -// struct { -// yaml_tag_directive_t *start; -// yaml_tag_directive_t *end; -// yaml_tag_directive_t *top; -// } tag_directives_copy = { NULL, NULL, NULL }; -// yaml_tag_directive_t value = { NULL, NULL }; -// YAML_mark_t mark = { 0, 0, 0 }; -// -// assert(document); /* Non-NULL document object is expected. */ -// assert((tag_directives_start && tag_directives_end) || -// (tag_directives_start == tag_directives_end)); -// /* Valid tag directives are expected. */ -// -// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error; -// -// if (version_directive) { -// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)); -// if (!version_directive_copy) goto error; -// version_directive_copy.major = version_directive.major; -// version_directive_copy.minor = version_directive.minor; -// } -// -// if (tag_directives_start != tag_directives_end) { -// yaml_tag_directive_t *tag_directive; -// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) -// goto error; -// for (tag_directive = tag_directives_start; -// tag_directive != tag_directives_end; tag_directive ++) { -// assert(tag_directive.handle); -// assert(tag_directive.prefix); -// if (!yaml_check_utf8(tag_directive.handle, -// strlen((char *)tag_directive.handle))) -// goto error; -// if (!yaml_check_utf8(tag_directive.prefix, -// strlen((char *)tag_directive.prefix))) -// goto error; -// value.handle = yaml_strdup(tag_directive.handle); -// value.prefix = yaml_strdup(tag_directive.prefix); -// if (!value.handle || !value.prefix) goto error; -// if (!PUSH(&context, tag_directives_copy, value)) -// goto error; -// value.handle = NULL; -// value.prefix = NULL; -// } -// } -// -// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, -// tag_directives_copy.start, tag_directives_copy.top, -// start_implicit, end_implicit, mark, mark); -// -// return 1; -// -// error: -// STACK_DEL(&context, nodes); -// yaml_free(version_directive_copy); -// while (!STACK_EMPTY(&context, tag_directives_copy)) { -// yaml_tag_directive_t value = POP(&context, tag_directives_copy); -// yaml_free(value.handle); -// yaml_free(value.prefix); -// } -// STACK_DEL(&context, tag_directives_copy); -// yaml_free(value.handle); -// yaml_free(value.prefix); -// -// return 0; -// } -// -// /* -// * Destroy a document object. -// */ -// -// yaml_DECLARE(void) -// yaml_document_delete(document *yaml_document_t) -// { -// struct { -// YAML_error_type_t error; -// } context; -// yaml_tag_directive_t *tag_directive; -// -// context.error = yaml_NO_ERROR; /* Eliminate a compliler warning. */ -// -// assert(document); /* Non-NULL document object is expected. */ -// -// while (!STACK_EMPTY(&context, document.nodes)) { -// yaml_node_t node = POP(&context, document.nodes); -// yaml_free(node.tag); -// switch (node.type) { -// case yaml_SCALAR_NODE: -// yaml_free(node.data.scalar.value); -// break; -// case yaml_SEQUENCE_NODE: -// STACK_DEL(&context, node.data.sequence.items); -// break; -// case yaml_MAPPING_NODE: -// STACK_DEL(&context, node.data.mapping.pairs); -// break; -// default: -// assert(0); /* Should not happen. */ -// } -// } -// STACK_DEL(&context, document.nodes); -// -// yaml_free(document.version_directive); -// for (tag_directive = document.tag_directives.start; -// tag_directive != document.tag_directives.end; -// tag_directive++) { -// yaml_free(tag_directive.handle); -// yaml_free(tag_directive.prefix); -// } -// yaml_free(document.tag_directives.start); -// -// memset(document, 0, sizeof(yaml_document_t)); -// } -// -// /** -// * Get a document node. -// */ -// -// yaml_DECLARE(yaml_node_t *) -// yaml_document_get_node(document *yaml_document_t, int index) -// { -// assert(document); /* Non-NULL document object is expected. */ -// -// if (index > 0 && document.nodes.start + index <= document.nodes.top) { -// return document.nodes.start + index - 1; -// } -// return NULL; -// } -// -// /** -// * Get the root object. -// */ -// -// yaml_DECLARE(yaml_node_t *) -// yaml_document_get_root_node(document *yaml_document_t) -// { -// assert(document); /* Non-NULL document object is expected. */ -// -// if (document.nodes.top != document.nodes.start) { -// return document.nodes.start; -// } -// return NULL; -// } -// -// /* -// * Add a scalar node to a document. -// */ -// -// yaml_DECLARE(int) -// yaml_document_add_scalar(document *yaml_document_t, -// yaml_char_t *tag, yaml_char_t *value, int length, -// yaml_scalar_style_t style) -// { -// struct { -// YAML_error_type_t error; -// } context; -// YAML_mark_t mark = { 0, 0, 0 }; -// yaml_char_t *tag_copy = NULL; -// yaml_char_t *value_copy = NULL; -// yaml_node_t node; -// -// assert(document); /* Non-NULL document object is expected. */ -// assert(value); /* Non-NULL value is expected. */ -// -// if (!tag) { -// tag = (yaml_char_t *)yaml_DEFAULT_SCALAR_TAG; -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error; -// tag_copy = yaml_strdup(tag); -// if (!tag_copy) goto error; -// -// if (length < 0) { -// length = strlen((char *)value); -// } -// -// if (!yaml_check_utf8(value, length)) goto error; -// value_copy = yaml_malloc(length+1); -// if (!value_copy) goto error; -// memcpy(value_copy, value, length); -// value_copy[length] = '\0'; -// -// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark); -// if (!PUSH(&context, document.nodes, node)) goto error; -// -// return document.nodes.top - document.nodes.start; -// -// error: -// yaml_free(tag_copy); -// yaml_free(value_copy); -// -// return 0; -// } -// -// /* -// * Add a sequence node to a document. -// */ -// -// yaml_DECLARE(int) -// yaml_document_add_sequence(document *yaml_document_t, -// yaml_char_t *tag, yaml_sequence_style_t style) -// { -// struct { -// YAML_error_type_t error; -// } context; -// YAML_mark_t mark = { 0, 0, 0 }; -// yaml_char_t *tag_copy = NULL; -// struct { -// yaml_node_item_t *start; -// yaml_node_item_t *end; -// yaml_node_item_t *top; -// } items = { NULL, NULL, NULL }; -// yaml_node_t node; -// -// assert(document); /* Non-NULL document object is expected. */ -// -// if (!tag) { -// tag = (yaml_char_t *)yaml_DEFAULT_SEQUENCE_TAG; -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error; -// tag_copy = yaml_strdup(tag); -// if (!tag_copy) goto error; -// -// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error; -// -// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, -// style, mark, mark); -// if (!PUSH(&context, document.nodes, node)) goto error; -// -// return document.nodes.top - document.nodes.start; -// -// error: -// STACK_DEL(&context, items); -// yaml_free(tag_copy); -// -// return 0; -// } -// -// /* -// * Add a mapping node to a document. -// */ -// -// yaml_DECLARE(int) -// yaml_document_add_mapping(document *yaml_document_t, -// yaml_char_t *tag, yaml_mapping_style_t style) -// { -// struct { -// YAML_error_type_t error; -// } context; -// YAML_mark_t mark = { 0, 0, 0 }; -// yaml_char_t *tag_copy = NULL; -// struct { -// yaml_node_pair_t *start; -// yaml_node_pair_t *end; -// yaml_node_pair_t *top; -// } pairs = { NULL, NULL, NULL }; -// yaml_node_t node; -// -// assert(document); /* Non-NULL document object is expected. */ -// -// if (!tag) { -// tag = (yaml_char_t *)yaml_DEFAULT_MAPPING_TAG; -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error; -// tag_copy = yaml_strdup(tag); -// if (!tag_copy) goto error; -// -// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error; -// -// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, -// style, mark, mark); -// if (!PUSH(&context, document.nodes, node)) goto error; -// -// return document.nodes.top - document.nodes.start; -// -// error: -// STACK_DEL(&context, pairs); -// yaml_free(tag_copy); -// -// return 0; -// } -// -// /* -// * Append an item to a sequence node. -// */ -// -// yaml_DECLARE(int) -// yaml_document_append_sequence_item(document *yaml_document_t, -// int sequence, int item) -// { -// struct { -// YAML_error_type_t error; -// } context; -// -// assert(document); /* Non-NULL document is required. */ -// assert(sequence > 0 -// && document.nodes.start + sequence <= document.nodes.top); -// /* Valid sequence id is required. */ -// assert(document.nodes.start[sequence-1].type == yaml_SEQUENCE_NODE); -// /* A sequence node is required. */ -// assert(item > 0 && document.nodes.start + item <= document.nodes.top); -// /* Valid item id is required. */ -// -// if (!PUSH(&context, -// document.nodes.start[sequence-1].data.sequence.items, item)) -// return 0; -// -// return 1; -// } -// -// /* -// * Append a pair of a key and a value to a mapping node. -// */ -// -// yaml_DECLARE(int) -// yaml_document_append_mapping_pair(document *yaml_document_t, -// int mapping, int key, int value) -// { -// struct { -// YAML_error_type_t error; -// } context; -// -// yaml_node_pair_t pair; -// -// assert(document); /* Non-NULL document is required. */ -// assert(mapping > 0 -// && document.nodes.start + mapping <= document.nodes.top); -// /* Valid mapping id is required. */ -// assert(document.nodes.start[mapping-1].type == yaml_MAPPING_NODE); -// /* A mapping node is required. */ -// assert(key > 0 && document.nodes.start + key <= document.nodes.top); -// /* Valid key id is required. */ -// assert(value > 0 && document.nodes.start + value <= document.nodes.top); -// /* Valid value id is required. */ -// -// pair.key = key; -// pair.value = value; -// -// if (!PUSH(&context, -// document.nodes.start[mapping-1].data.mapping.pairs, pair)) -// return 0; -// -// return 1; -// } -// diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/decode.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/decode.go deleted file mode 100644 index dcc1b89cf..000000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/decode.go +++ /dev/null @@ -1,622 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -import ( - "bytes" - "errors" - "fmt" - "io" - "reflect" - "runtime" - "strconv" - "strings" -) - -type Unmarshaler interface { - UnmarshalYAML(tag string, value interface{}) error -} - -// A Number represents a JSON number literal. -type Number string - -// String returns the literal text of the number. -func (n Number) String() string { return string(n) } - -// Float64 returns the number as a float64. -func (n Number) Float64() (float64, error) { - return strconv.ParseFloat(string(n), 64) -} - -// Int64 returns the number as an int64. -func (n Number) Int64() (int64, error) { - return strconv.ParseInt(string(n), 10, 64) -} - -type Decoder struct { - parser yaml_parser_t - event yaml_event_t - replay_events []yaml_event_t - useNumber bool - - anchors map[string][]yaml_event_t - tracking_anchors [][]yaml_event_t -} - -type ParserError struct { - ErrorType YAML_error_type_t - Context string - ContextMark YAML_mark_t - Problem string - ProblemMark YAML_mark_t -} - -func (e *ParserError) Error() string { - return fmt.Sprintf("yaml: [%s] %s at line %d, column %d", e.Context, e.Problem, e.ProblemMark.line+1, e.ProblemMark.column+1) -} - -type UnexpectedEventError struct { - Value string - EventType yaml_event_type_t - At YAML_mark_t -} - -func (e *UnexpectedEventError) Error() string { - return fmt.Sprintf("yaml: Unexpect event [%d]: '%s' at line %d, column %d", e.EventType, e.Value, e.At.line+1, e.At.column+1) -} - -func recovery(err *error) { - if r := recover(); r != nil { - if _, ok := r.(runtime.Error); ok { - panic(r) - } - - var tmpError error - switch r := r.(type) { - case error: - tmpError = r - case string: - tmpError = errors.New(r) - default: - tmpError = errors.New("Unknown panic: " + reflect.ValueOf(r).String()) - } - - *err = tmpError - } -} - -func Unmarshal(data []byte, v interface{}) error { - d := NewDecoder(bytes.NewBuffer(data)) - return d.Decode(v) -} - -func NewDecoder(r io.Reader) *Decoder { - d := &Decoder{ - anchors: make(map[string][]yaml_event_t), - tracking_anchors: make([][]yaml_event_t, 1), - } - yaml_parser_initialize(&d.parser) - yaml_parser_set_input_reader(&d.parser, r) - return d -} - -func (d *Decoder) Decode(v interface{}) (err error) { - defer recovery(&err) - - rv := reflect.ValueOf(v) - if rv.Kind() != reflect.Ptr || rv.IsNil() { - return fmt.Errorf("Expected a pointer or nil but was a %s at %s", rv.String(), d.event.start_mark) - } - - if d.event.event_type == yaml_NO_EVENT { - d.nextEvent() - - if d.event.event_type != yaml_STREAM_START_EVENT { - return errors.New("Invalid stream") - } - - d.nextEvent() - } - - d.document(rv) - return nil -} - -func (d *Decoder) UseNumber() { d.useNumber = true } - -func (d *Decoder) error(err error) { - panic(err) -} - -func (d *Decoder) nextEvent() { - if d.event.event_type == yaml_STREAM_END_EVENT { - d.error(errors.New("The stream is closed")) - } - - if d.replay_events != nil { - d.event = d.replay_events[0] - if len(d.replay_events) == 1 { - d.replay_events = nil - } else { - d.replay_events = d.replay_events[1:] - } - } else { - if !yaml_parser_parse(&d.parser, &d.event) { - yaml_event_delete(&d.event) - - d.error(&ParserError{ - ErrorType: d.parser.error, - Context: d.parser.context, - ContextMark: d.parser.context_mark, - Problem: d.parser.problem, - ProblemMark: d.parser.problem_mark, - }) - } - } - - last := len(d.tracking_anchors) - // skip aliases when tracking an anchor - if last > 0 && d.event.event_type != yaml_ALIAS_EVENT { - d.tracking_anchors[last-1] = append(d.tracking_anchors[last-1], d.event) - } -} - -func (d *Decoder) document(rv reflect.Value) { - if d.event.event_type != yaml_DOCUMENT_START_EVENT { - d.error(fmt.Errorf("Expected document start at %s", d.event.start_mark)) - } - - d.nextEvent() - d.parse(rv) - - if d.event.event_type != yaml_DOCUMENT_END_EVENT { - d.error(fmt.Errorf("Expected document end at %s", d.event.start_mark)) - } - - d.nextEvent() -} - -func (d *Decoder) parse(rv reflect.Value) { - if !rv.IsValid() { - // skip ahead since we cannot store - d.valueInterface() - return - } - - anchor := string(d.event.anchor) - switch d.event.event_type { - case yaml_SEQUENCE_START_EVENT: - d.begin_anchor(anchor) - d.sequence(rv) - d.end_anchor(anchor) - case yaml_MAPPING_START_EVENT: - d.begin_anchor(anchor) - d.mapping(rv) - d.end_anchor(anchor) - case yaml_SCALAR_EVENT: - d.begin_anchor(anchor) - d.scalar(rv) - d.end_anchor(anchor) - case yaml_ALIAS_EVENT: - d.alias(rv) - case yaml_DOCUMENT_END_EVENT: - default: - d.error(&UnexpectedEventError{ - Value: string(d.event.value), - EventType: d.event.event_type, - At: d.event.start_mark, - }) - } -} - -func (d *Decoder) begin_anchor(anchor string) { - if anchor != "" { - events := []yaml_event_t{d.event} - d.tracking_anchors = append(d.tracking_anchors, events) - } -} - -func (d *Decoder) end_anchor(anchor string) { - if anchor != "" { - events := d.tracking_anchors[len(d.tracking_anchors)-1] - d.tracking_anchors = d.tracking_anchors[0 : len(d.tracking_anchors)-1] - // remove the anchor, replaying events shouldn't have anchors - events[0].anchor = nil - // we went one too many, remove the extra event - events = events[:len(events)-1] - // if nested, append to all the other anchors - for i, e := range d.tracking_anchors { - d.tracking_anchors[i] = append(e, events...) - } - d.anchors[anchor] = events - } -} - -func (d *Decoder) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, reflect.Value) { - // If v is a named type and is addressable, - // start with its address, so that if the type has pointer methods, - // we find them. - if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { - v = v.Addr() - } - for { - // Load value from interface, but only if the result will be - // usefully addressable. - if v.Kind() == reflect.Interface && !v.IsNil() { - e := v.Elem() - if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { - v = e - continue - } - } - - if v.Kind() != reflect.Ptr { - break - } - - if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { - break - } - - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - - if v.Type().NumMethod() > 0 { - if u, ok := v.Interface().(Unmarshaler); ok { - var temp interface{} - return u, reflect.ValueOf(&temp) - } - } - - v = v.Elem() - } - - return nil, v -} - -func (d *Decoder) sequence(v reflect.Value) { - if d.event.event_type != yaml_SEQUENCE_START_EVENT { - d.error(fmt.Errorf("Expected sequence start at %s", d.event.start_mark)) - } - - u, pv := d.indirect(v, false) - if u != nil { - defer func() { - if err := u.UnmarshalYAML(yaml_SEQ_TAG, pv.Interface()); err != nil { - d.error(err) - } - }() - _, pv = d.indirect(pv, false) - } - - v = pv - - // Check type of target. - switch v.Kind() { - case reflect.Interface: - if v.NumMethod() == 0 { - // Decoding into nil interface? Switch to non-reflect code. - v.Set(reflect.ValueOf(d.sequenceInterface())) - return - } - // Otherwise it's invalid. - fallthrough - default: - d.error(fmt.Errorf("Expected an array, slice or interface{} but was a %s at %s", v, d.event.start_mark)) - case reflect.Array: - case reflect.Slice: - break - } - - d.nextEvent() - - i := 0 -done: - for { - switch d.event.event_type { - case yaml_SEQUENCE_END_EVENT, yaml_DOCUMENT_END_EVENT: - break done - } - - // Get element of array, growing if necessary. - if v.Kind() == reflect.Slice { - // Grow slice if necessary - if i >= v.Cap() { - newcap := v.Cap() + v.Cap()/2 - if newcap < 4 { - newcap = 4 - } - newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) - reflect.Copy(newv, v) - v.Set(newv) - } - if i >= v.Len() { - v.SetLen(i + 1) - } - } - - if i < v.Len() { - // Decode into element. - d.parse(v.Index(i)) - } else { - // Ran out of fixed array: skip. - d.parse(reflect.Value{}) - } - i++ - } - - if i < v.Len() { - if v.Kind() == reflect.Array { - // Array. Zero the rest. - z := reflect.Zero(v.Type().Elem()) - for ; i < v.Len(); i++ { - v.Index(i).Set(z) - } - } else { - v.SetLen(i) - } - } - if i == 0 && v.Kind() == reflect.Slice { - v.Set(reflect.MakeSlice(v.Type(), 0, 0)) - } - - if d.event.event_type != yaml_DOCUMENT_END_EVENT { - d.nextEvent() - } -} - -func (d *Decoder) mapping(v reflect.Value) { - u, pv := d.indirect(v, false) - if u != nil { - defer func() { - if err := u.UnmarshalYAML(yaml_MAP_TAG, pv.Interface()); err != nil { - d.error(err) - } - }() - _, pv = d.indirect(pv, false) - } - v = pv - - // Decoding into nil interface? Switch to non-reflect code. - if v.Kind() == reflect.Interface && v.NumMethod() == 0 { - v.Set(reflect.ValueOf(d.mappingInterface())) - return - } - - // Check type of target: struct or map[X]Y - switch v.Kind() { - case reflect.Struct: - d.mappingStruct(v) - return - case reflect.Map: - default: - d.error(fmt.Errorf("Expected a struct or map but was a %s at %s ", v, d.event.start_mark)) - } - - mapt := v.Type() - if v.IsNil() { - v.Set(reflect.MakeMap(mapt)) - } - - d.nextEvent() - - keyt := mapt.Key() - mapElemt := mapt.Elem() - - var mapElem reflect.Value -done: - for { - switch d.event.event_type { - case yaml_MAPPING_END_EVENT: - break done - case yaml_DOCUMENT_END_EVENT: - return - } - - key := reflect.New(keyt) - d.parse(key.Elem()) - - if !mapElem.IsValid() { - mapElem = reflect.New(mapElemt).Elem() - } else { - mapElem.Set(reflect.Zero(mapElemt)) - } - - d.parse(mapElem) - - v.SetMapIndex(key.Elem(), mapElem) - } - - d.nextEvent() -} - -func (d *Decoder) mappingStruct(v reflect.Value) { - - structt := v.Type() - fields := cachedTypeFields(structt) - - d.nextEvent() - -done: - for { - switch d.event.event_type { - case yaml_MAPPING_END_EVENT: - break done - case yaml_DOCUMENT_END_EVENT: - return - } - - key := "" - d.parse(reflect.ValueOf(&key)) - - // Figure out field corresponding to key. - var subv reflect.Value - - var f *field - for i := range fields { - ff := &fields[i] - if ff.name == key { - f = ff - break - } - - if f == nil && strings.EqualFold(ff.name, key) { - f = ff - } - } - - if f != nil { - subv = v - for _, i := range f.index { - if subv.Kind() == reflect.Ptr { - if subv.IsNil() { - subv.Set(reflect.New(subv.Type().Elem())) - } - subv = subv.Elem() - } - subv = subv.Field(i) - } - } - d.parse(subv) - } - - d.nextEvent() -} - -func (d *Decoder) scalar(v reflect.Value) { - val := string(d.event.value) - wantptr := null_values[val] - - u, pv := d.indirect(v, wantptr) - - var tag string - if u != nil { - defer func() { - if err := u.UnmarshalYAML(tag, pv.Interface()); err != nil { - d.error(err) - } - }() - - _, pv = d.indirect(pv, wantptr) - } - v = pv - - var err error - tag, err = resolve(d.event, v, d.useNumber) - if err != nil { - d.error(err) - } - - d.nextEvent() -} - -func (d *Decoder) alias(rv reflect.Value) { - val, ok := d.anchors[string(d.event.anchor)] - if !ok { - d.error(fmt.Errorf("missing anchor: '%s' at %s", d.event.anchor, d.event.start_mark)) - } - - d.replay_events = val - d.nextEvent() - d.parse(rv) -} - -func (d *Decoder) valueInterface() interface{} { - var v interface{} - - anchor := string(d.event.anchor) - switch d.event.event_type { - case yaml_SEQUENCE_START_EVENT: - d.begin_anchor(anchor) - v = d.sequenceInterface() - case yaml_MAPPING_START_EVENT: - d.begin_anchor(anchor) - v = d.mappingInterface() - case yaml_SCALAR_EVENT: - d.begin_anchor(anchor) - v = d.scalarInterface() - case yaml_ALIAS_EVENT: - rv := reflect.ValueOf(&v) - d.alias(rv) - return v - case yaml_DOCUMENT_END_EVENT: - d.error(&UnexpectedEventError{ - Value: string(d.event.value), - EventType: d.event.event_type, - At: d.event.start_mark, - }) - - } - d.end_anchor(anchor) - - return v -} - -func (d *Decoder) scalarInterface() interface{} { - _, v := resolveInterface(d.event, d.useNumber) - - d.nextEvent() - return v -} - -// sequenceInterface is like sequence but returns []interface{}. -func (d *Decoder) sequenceInterface() []interface{} { - var v = make([]interface{}, 0) - - d.nextEvent() - -done: - for { - switch d.event.event_type { - case yaml_SEQUENCE_END_EVENT, yaml_DOCUMENT_END_EVENT: - break done - } - - v = append(v, d.valueInterface()) - } - - if d.event.event_type != yaml_DOCUMENT_END_EVENT { - d.nextEvent() - } - - return v -} - -// mappingInterface is like mapping but returns map[interface{}]interface{}. -func (d *Decoder) mappingInterface() map[interface{}]interface{} { - m := make(map[interface{}]interface{}) - - d.nextEvent() - -done: - for { - switch d.event.event_type { - case yaml_MAPPING_END_EVENT, yaml_DOCUMENT_END_EVENT: - break done - } - - key := d.valueInterface() - - // Read value. - m[key] = d.valueInterface() - } - - if d.event.event_type != yaml_DOCUMENT_END_EVENT { - d.nextEvent() - } - - return m -} diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/emitter.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/emitter.go deleted file mode 100644 index bd2014f34..000000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/emitter.go +++ /dev/null @@ -1,2072 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -import ( - "bytes" -) - -var default_tag_directives = []yaml_tag_directive_t{ - {[]byte("!"), []byte("!")}, - {[]byte("!!"), []byte("tag:yaml.org,2002:")}, -} - -/* - * Flush the buffer if needed. - */ - -func flush(emitter *yaml_emitter_t) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) { - return yaml_emitter_flush(emitter) - } - return true -} - -/* - * Put a character to the output buffer. - */ -func put(emitter *yaml_emitter_t, value byte) bool { - if !flush(emitter) { - return false - } - - emitter.buffer[emitter.buffer_pos] = value - emitter.buffer_pos++ - emitter.column++ - return true -} - -/* - * Put a line break to the output buffer. - */ - -func put_break(emitter *yaml_emitter_t) bool { - if !flush(emitter) { - return false - } - switch emitter.line_break { - case yaml_CR_BREAK: - emitter.buffer[emitter.buffer_pos] = '\r' - emitter.buffer_pos++ - case yaml_LN_BREAK: - emitter.buffer[emitter.buffer_pos] = '\n' - emitter.buffer_pos++ - case yaml_CRLN_BREAK: - emitter.buffer[emitter.buffer_pos] = '\r' - emitter.buffer[emitter.buffer_pos] = '\n' - emitter.buffer_pos += 2 - default: - return false - } - emitter.column = 0 - emitter.line++ - return true -} - -/* - * Copy a character from a string into buffer. - */ -func write(emitter *yaml_emitter_t, src []byte, src_pos *int) bool { - if !flush(emitter) { - return false - } - copy_bytes(emitter.buffer, &emitter.buffer_pos, src, src_pos) - emitter.column++ - return true -} - -/* - * Copy a line break character from a string into buffer. - */ - -func write_break(emitter *yaml_emitter_t, src []byte, src_pos *int) bool { - if src[*src_pos] == '\n' { - if !put_break(emitter) { - return false - } - *src_pos++ - } else { - if !write(emitter, src, src_pos) { - return false - } - emitter.column = 0 - emitter.line++ - } - - return true -} - -/* - * Set an emitter error and return 0. - */ - -func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_EMITTER_ERROR - emitter.problem = problem - return false -} - -/* - * Emit an event. - */ - -func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.events = append(emitter.events, *event) - for !yaml_emitter_need_more_events(emitter) { - event := &emitter.events[emitter.events_head] - if !yaml_emitter_analyze_event(emitter, event) { - return false - } - if !yaml_emitter_state_machine(emitter, event) { - return false - } - yaml_event_delete(event) - emitter.events_head++ - } - return true -} - -/* - * Check if we need to accumulate more events before emitting. - * - * We accumulate extra - * - 1 event for DOCUMENT-START - * - 2 events for SEQUENCE-START - * - 3 events for MAPPING-START - */ - -func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { - if emitter.events_head == len(emitter.events) { - return true - } - - accumulate := 0 - switch emitter.events[emitter.events_head].event_type { - case yaml_DOCUMENT_START_EVENT: - accumulate = 1 - case yaml_SEQUENCE_START_EVENT: - accumulate = 2 - case yaml_MAPPING_START_EVENT: - accumulate = 3 - default: - return false - } - - if len(emitter.events)-emitter.events_head > accumulate { - return false - } - - level := 0 - for i := emitter.events_head; i < len(emitter.events); i++ { - switch emitter.events[i].event_type { - case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: - level++ - case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: - level-- - } - - if level == 0 { - return false - } - } - return true -} - -/* - * Append a directive to the directives stack. - */ - -func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, - value *yaml_tag_directive_t, allow_duplicates bool) bool { - - for i := range emitter.tag_directives { - - if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { - if allow_duplicates { - return true - } - return yaml_emitter_set_emitter_error(emitter, "duplicat %TAG directive") - } - } - - tag_copy := yaml_tag_directive_t{ - handle: value.handle, - prefix: value.prefix, - } - - emitter.tag_directives = append(emitter.tag_directives, tag_copy) - - return true -} - -/* - * Increase the indentation level. - */ - -func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow bool, indentless bool) bool { - - emitter.indents = append(emitter.indents, emitter.indent) - - if emitter.indent < 0 { - if flow { - emitter.indent = emitter.best_indent - } else { - emitter.indent = 0 - } - } else if !indentless { - emitter.indent += emitter.best_indent - } - - return true -} - -/* - * State dispatcher. - */ - -func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { - switch emitter.state { - case yaml_EMIT_STREAM_START_STATE: - return yaml_emitter_emit_stream_start(emitter, event) - - case yaml_EMIT_FIRST_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, true) - - case yaml_EMIT_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, false) - - case yaml_EMIT_DOCUMENT_CONTENT_STATE: - return yaml_emitter_emit_document_content(emitter, event) - - case yaml_EMIT_DOCUMENT_END_STATE: - return yaml_emitter_emit_document_end(emitter, event) - - case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, true) - - case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, false) - - case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, true) - - case yaml_EMIT_FLOW_MAPPING_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, false) - - case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, true) - - case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, false) - - case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, true) - - case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, false) - - case yaml_EMIT_END_STATE: - return yaml_emitter_set_emitter_error(emitter, - "expected nothing after STREAM-END") - - } - - panic("invalid state") -} - -/* - * Expect STREAM-START. - */ - -func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - if event.event_type != yaml_STREAM_START_EVENT { - return yaml_emitter_set_emitter_error(emitter, - "expected STREAM-START") - } - - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = event.encoding - - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = yaml_UTF8_ENCODING - } - } - - if emitter.best_indent < 2 || emitter.best_indent > 9 { - emitter.best_indent = 2 - } - - if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { - emitter.best_width = 80 - } - - if emitter.best_width < 0 { - emitter.best_width = 1<<31 - 1 - } - - if emitter.line_break == yaml_ANY_BREAK { - emitter.line_break = yaml_LN_BREAK - } - - emitter.indent = -1 - - emitter.line = 0 - emitter.column = 0 - emitter.whitespace = true - emitter.indention = true - - if emitter.encoding != yaml_UTF8_ENCODING { - if !yaml_emitter_write_bom(emitter) { - return false - } - } - - emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE - - return true -} - -/* - * Expect DOCUMENT-START or STREAM-END. - */ - -func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, - event *yaml_event_t, first bool) bool { - - if event.event_type == yaml_DOCUMENT_START_EVENT { - if event.version_directive != nil { - if !yaml_emitter_analyze_version_directive(emitter, - *event.version_directive) { - return false - } - } - - for i := range event.tag_directives { - tag_directive := &event.tag_directives[i] - - if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { - return false - } - if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { - return false - } - } - - for i := range default_tag_directives { - if !yaml_emitter_append_tag_directive(emitter, &default_tag_directives[i], true) { - return false - } - } - - implicit := event.implicit - if !first || emitter.canonical { - implicit = false - } - - if (event.version_directive != nil || len(event.tag_directives) > 0) && - emitter.open_ended { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if event.version_directive != nil { - implicit = false - if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { - return false - } - - if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { - return false - } - - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if len(event.tag_directives) > 0 { - implicit = false - for i := range event.tag_directives { - tag_directive := &event.tag_directives[i] - - if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { - return false - } - if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { - return false - } - if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - if yaml_emitter_check_empty_document(emitter) { - implicit = false - } - - if !implicit { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { - return false - } - - if emitter.canonical { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE - - return true - } else if event.event_type == yaml_STREAM_END_EVENT { - if emitter.open_ended { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if !yaml_emitter_flush(emitter) { - return false - } - - emitter.state = yaml_EMIT_END_STATE - - return true - } - - return yaml_emitter_set_emitter_error(emitter, - "expected DOCUMENT-START or STREAM-END") -} - -/* - * Expect the root node. - */ - -func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) - - return yaml_emitter_emit_node(emitter, event, true, false, false, false) -} - -/* - * Expect DOCUMENT-END. - */ - -func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - if event.event_type != yaml_DOCUMENT_END_EVENT { - return yaml_emitter_set_emitter_error(emitter, - "expected DOCUMENT-END") - } - - if !yaml_emitter_write_indent(emitter) { - return false - } - if !event.implicit { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_flush(emitter) { - return false - } - - emitter.state = yaml_EMIT_DOCUMENT_START_STATE - emitter.tag_directives = emitter.tag_directives[:0] - return true -} - -/* - * - * Expect a flow item node. - */ - -func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_write_indicator(emitter, []byte("["), true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.event_type == yaml_SEQUENCE_END_EVENT { - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.canonical && !first { - if !yaml_emitter_write_indicator(emitter, []byte(","), false, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte("]"), false, false, false) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - - return true - } - - if !first { - if !yaml_emitter_write_indicator(emitter, []byte(","), false, false, false) { - return false - } - } - - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) - return yaml_emitter_emit_node(emitter, event, false, true, false, false) -} - -/* - * Expect a flow key node. - */ - -func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, - event *yaml_event_t, first bool) bool { - - if first { - - if !yaml_emitter_write_indicator(emitter, []byte("{"), true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.event_type == yaml_MAPPING_END_EVENT { - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - - if emitter.canonical && !first { - if !yaml_emitter_write_indicator(emitter, []byte(","), false, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte("}"), false, false, false) { - return false - } - - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - - return true - } - - if !first { - if !yaml_emitter_write_indicator(emitter, []byte(","), false, false, false) { - return false - } - } - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } else { - if !yaml_emitter_write_indicator(emitter, []byte("?"), true, false, false) { - return false - } - - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) - } -} - -/* - * Expect a flow value node. - */ - -func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, - event *yaml_event_t, simple bool) bool { - - if simple { - if !yaml_emitter_write_indicator(emitter, []byte(":"), false, false, false) { - return false - } - } else { - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte(":"), true, false, false) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -/* - * Expect a block item node. - */ - -func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, - event *yaml_event_t, first bool) bool { - - if first { - if !yaml_emitter_increase_indent(emitter, false, - (emitter.mapping_context && !emitter.indention)) { - return false - } - } - - if event.event_type == yaml_SEQUENCE_END_EVENT { - - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - - return true - } - - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("-"), true, false, true) { - return false - } - - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) - return yaml_emitter_emit_node(emitter, event, false, true, false, false) -} - -/* - * Expect a block key node. - */ - -func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, - event *yaml_event_t, first bool) bool { - - if first { - if !yaml_emitter_increase_indent(emitter, false, false) { - return false - } - } - - if event.event_type == yaml_MAPPING_END_EVENT { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - - return true - } - - if !yaml_emitter_write_indent(emitter) { - return false - } - - if yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) - - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } else { - if !yaml_emitter_write_indicator(emitter, []byte("?"), true, false, true) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) - - return yaml_emitter_emit_node(emitter, event, false, false, true, false) - } -} - -/* - * Expect a block value node. - */ - -func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, - event *yaml_event_t, simple bool) bool { - - if simple { - if !yaml_emitter_write_indicator(emitter, []byte(":"), false, false, false) { - return false - } - } else { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte(":"), true, false, true) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) - - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -/* - * Expect a node. - */ - -func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, - root bool, sequence bool, mapping bool, simple_key bool) bool { - emitter.root_context = root - emitter.sequence_context = sequence - emitter.mapping_context = mapping - emitter.simple_key_context = simple_key - - switch event.event_type { - case yaml_ALIAS_EVENT: - return yaml_emitter_emit_alias(emitter, event) - - case yaml_SCALAR_EVENT: - return yaml_emitter_emit_scalar(emitter, event) - - case yaml_SEQUENCE_START_EVENT: - return yaml_emitter_emit_sequence_start(emitter, event) - - case yaml_MAPPING_START_EVENT: - return yaml_emitter_emit_mapping_start(emitter, event) - - default: - return yaml_emitter_set_emitter_error(emitter, - "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS") - } - - return false -} - -/* - * Expect ALIAS. - */ - -func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - - return true -} - -/* - * Expect SCALAR. - */ - -func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_select_scalar_style(emitter, event) { - return false - } - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - if !yaml_emitter_process_scalar(emitter) { - return false - } - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - - return true -} - -/* - * Expect SEQUENCE-START. - */ - -func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - - if emitter.flow_level > 0 || emitter.canonical || - event.style == yaml_style_t(yaml_FLOW_SEQUENCE_STYLE) || - yaml_emitter_check_empty_sequence(emitter) { - emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE - } - - return true -} - -/* - * Expect MAPPING-START. - */ - -func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - - if emitter.flow_level > 0 || emitter.canonical || - event.style == yaml_style_t(yaml_FLOW_MAPPING_STYLE) || - yaml_emitter_check_empty_mapping(emitter) { - emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE - } - - return true -} - -/* - * Check if the document content is an empty scalar. - */ - -func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { - return false -} - -/* - * Check if the next events represent an empty sequence. - */ - -func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - - return (emitter.events[emitter.events_head].event_type == yaml_SEQUENCE_START_EVENT && - emitter.events[emitter.events_head+1].event_type == yaml_SEQUENCE_END_EVENT) -} - -/* - * Check if the next events represent an empty mapping. - */ - -func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - - return (emitter.events[emitter.events_head].event_type == yaml_MAPPING_START_EVENT && - emitter.events[emitter.events_head+1].event_type == yaml_MAPPING_END_EVENT) -} - -/* - * Check if the next node can be expressed as a simple key. - */ - -func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { - length := 0 - - switch emitter.events[emitter.events_head].event_type { - case yaml_ALIAS_EVENT: - length += len(emitter.anchor_data.anchor) - - case yaml_SCALAR_EVENT: - if emitter.scalar_data.multiline { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) + - len(emitter.scalar_data.value) - - case yaml_SEQUENCE_START_EVENT: - if !yaml_emitter_check_empty_sequence(emitter) { - return false - } - - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - - case yaml_MAPPING_START_EVENT: - if !yaml_emitter_check_empty_mapping(emitter) { - return false - } - - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - - default: - return false - } - - if length > 128 { - return false - } - - return true -} - -/* - * Determine an acceptable scalar style. - */ - -func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { - no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 - - if no_tag && !event.implicit && !event.quoted_implicit { - return yaml_emitter_set_emitter_error(emitter, - "neither tag nor implicit flags are specified") - } - - style := yaml_scalar_style_t(event.style) - - if style == yaml_ANY_SCALAR_STYLE { - style = yaml_PLAIN_SCALAR_STYLE - } - - if emitter.canonical { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - - if emitter.simple_key_context && emitter.scalar_data.multiline { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - - if style == yaml_PLAIN_SCALAR_STYLE { - if (emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed) || - (emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed) { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if len(emitter.scalar_data.value) == 0 && - (emitter.flow_level > 0 || emitter.simple_key_context) { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if no_tag && !event.implicit { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - } - - if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { - if !emitter.scalar_data.single_quoted_allowed { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - - if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { - if !emitter.scalar_data.block_allowed || - emitter.flow_level > 0 || emitter.simple_key_context { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - - if no_tag && !event.quoted_implicit && - style != yaml_PLAIN_SCALAR_STYLE { - emitter.tag_data.handle = []byte("!") - } - - emitter.scalar_data.style = style - - return true -} - -/* - * Write an achor. - */ - -func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { - if emitter.anchor_data.anchor == nil { - return true - } - - indicator := "*" - if !emitter.anchor_data.alias { - indicator = "&" - } - if !yaml_emitter_write_indicator(emitter, []byte(indicator), true, false, false) { - return false - } - - return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) -} - -/* - * Write a tag. - */ - -func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { - if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { - return true - } - - if len(emitter.tag_data.handle) > 0 { - if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { - return false - } - - if len(emitter.tag_data.suffix) > 0 { - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - - } - } else { - if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { - return false - } - - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - - if !yaml_emitter_write_indicator(emitter, []byte(">"), false, false, false) { - return false - } - - } - - return true -} - -/* - * Write a scalar. - */ - -func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { - switch emitter.scalar_data.style { - case yaml_PLAIN_SCALAR_STYLE: - return yaml_emitter_write_plain_scalar(emitter, - emitter.scalar_data.value, - !emitter.simple_key_context) - - case yaml_SINGLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_single_quoted_scalar(emitter, - emitter.scalar_data.value, - !emitter.simple_key_context) - - case yaml_DOUBLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_double_quoted_scalar(emitter, - emitter.scalar_data.value, - !emitter.simple_key_context) - - case yaml_LITERAL_SCALAR_STYLE: - return yaml_emitter_write_literal_scalar(emitter, - emitter.scalar_data.value) - - case yaml_FOLDED_SCALAR_STYLE: - return yaml_emitter_write_folded_scalar(emitter, - emitter.scalar_data.value) - - default: - panic("unknown scalar") - } - - return false -} - -/* - * Check if a %YAML directive is valid. - */ - -func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, - version_directive yaml_version_directive_t) bool { - if version_directive.major != 1 || version_directive.minor != 1 { - return yaml_emitter_set_emitter_error(emitter, - "incompatible %YAML directive") - } - - return true -} - -/* - * Check if a %TAG directive is valid. - */ - -func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, - tag_directive *yaml_tag_directive_t) bool { - handle := tag_directive.handle - prefix := tag_directive.prefix - - if len(handle) == 0 { - return yaml_emitter_set_emitter_error(emitter, - "tag handle must not be empty") - } - - if handle[0] != '!' { - return yaml_emitter_set_emitter_error(emitter, - "tag handle must start with '!'") - } - - if handle[len(handle)-1] != '!' { - return yaml_emitter_set_emitter_error(emitter, - "tag handle must end with '!'") - } - - for i := 1; i < len(handle)-1; width(handle[i]) { - if !is_alpha(handle[i]) { - return yaml_emitter_set_emitter_error(emitter, - "tag handle must contain alphanumerical characters only") - } - } - - if len(prefix) == 0 { - return yaml_emitter_set_emitter_error(emitter, - "tag prefix must not be empty") - } - - return true -} - -/* - * Check if an anchor is valid. - */ - -func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, - anchor []byte, alias bool) bool { - if len(anchor) == 0 { - errmsg := "alias value must not be empty" - if !alias { - errmsg = "anchor value must not be empty" - } - return yaml_emitter_set_emitter_error(emitter, errmsg) - } - - for i := 0; i < len(anchor); i += width(anchor[i]) { - if !is_alpha(anchor[i]) { - errmsg := "alias value must contain alphanumerical characters only" - if !alias { - errmsg = "anchor value must contain alphanumerical characters only" - } - return yaml_emitter_set_emitter_error(emitter, errmsg) - } - } - - emitter.anchor_data.anchor = anchor - emitter.anchor_data.alias = alias - - return true -} - -/* - * Check if a tag is valid. - */ - -func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { - if len(tag) == 0 { - return yaml_emitter_set_emitter_error(emitter, - "tag value must not be empty") - } - - for i := range emitter.tag_directives { - tag_directive := &emitter.tag_directives[i] - if bytes.HasPrefix(tag, tag_directive.prefix) { - emitter.tag_data.handle = tag_directive.handle - emitter.tag_data.suffix = tag[len(tag_directive.prefix):] - return true - } - } - - emitter.tag_data.suffix = tag - - return true -} - -/* - * Check if a scalar is valid. - */ - -func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { - block_indicators := false - flow_indicators := false - line_breaks := false - special_characters := false - - leading_space := false - leading_break := false - trailing_space := false - trailing_break := false - break_space := false - space_break := false - - preceeded_by_whitespace := false - followed_by_whitespace := false - previous_space := false - previous_break := false - - emitter.scalar_data.value = value - - if len(value) == 0 { - emitter.scalar_data.multiline = false - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = false - - return true - } - - if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || - (value[0] == '.' && value[1] == '.' && value[2] == '.')) { - block_indicators = true - flow_indicators = true - } - - preceeded_by_whitespace = true - - for i, w := 0, 0; i < len(value); i += w { - w = width(value[i]) - followed_by_whitespace = i+w >= len(value) || is_blankz_at(value, i+w) - - if i == 0 { - switch value[i] { - case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': - flow_indicators = true - block_indicators = true - case '?', ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '-': - if followed_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } else { - switch value[i] { - case ',', '?', '[', ']', '{', '}': - flow_indicators = true - case ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '#': - if preceeded_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } - - if !is_printable_at(value, i) || (!is_ascii(value[i]) && !emitter.unicode) { - special_characters = true - } - - if is_break_at(value, i) { - line_breaks = true - } - - if is_space(value[i]) { - if i == 0 { - leading_space = true - } - if i+w == len(value) { - trailing_space = true - } - if previous_break { - break_space = true - } - previous_space = true - previous_break = false - } else if is_break_at(value, i) { - if i == 0 { - leading_break = true - } - if i+width(value[i]) == len(value) { - trailing_break = true - } - if previous_space { - space_break = true - } - previous_space = false - previous_break = true - } else { - previous_space = false - previous_break = false - } - - preceeded_by_whitespace = is_blankz_at(value, i) - } - - emitter.scalar_data.multiline = line_breaks - - emitter.scalar_data.flow_plain_allowed = true - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = true - - if leading_space || leading_break || trailing_space || trailing_break { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - - if trailing_space { - emitter.scalar_data.block_allowed = false - } - - if break_space { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - } - - if space_break || special_characters { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - emitter.scalar_data.block_allowed = false - } - - if line_breaks { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - - if flow_indicators { - emitter.scalar_data.flow_plain_allowed = false - } - - if block_indicators { - emitter.scalar_data.block_plain_allowed = false - } - - return true -} - -/* - * Check if the event data is valid. - */ - -func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.anchor_data.anchor = nil - emitter.tag_data.handle = nil - emitter.tag_data.suffix = nil - emitter.scalar_data.value = nil - - switch event.event_type { - case yaml_ALIAS_EVENT: - if !yaml_emitter_analyze_anchor(emitter, - event.anchor, true) { - return false - } - - case yaml_SCALAR_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, - event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || - (!event.implicit && - !event.quoted_implicit)) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - if !yaml_emitter_analyze_scalar(emitter, event.value) { - return false - } - case yaml_SEQUENCE_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, - event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || - !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, - event.tag) { - return false - } - } - case yaml_MAPPING_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, - event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || - !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, - event.tag) { - return false - } - } - - } - return true -} - -/* - * Write the BOM character. - */ - -func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { - if !flush(emitter) { - return false - } - - pos := emitter.buffer_pos - emitter.buffer[pos] = '\xEF' - emitter.buffer[pos+1] = '\xBB' - emitter.buffer[pos+2] = '\xBF' - emitter.buffer_pos += 3 - return true -} - -func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { - indent := emitter.indent - if indent < 0 { - indent = 0 - } - - if !emitter.indention || emitter.column > indent || - (emitter.column == indent && !emitter.whitespace) { - if !put_break(emitter) { - return false - } - } - - for emitter.column < indent { - if !put(emitter, ' ') { - return false - } - } - - emitter.whitespace = true - emitter.indention = true - - return true -} - -func yaml_emitter_write_indicator(emitter *yaml_emitter_t, - indicator []byte, need_whitespace bool, - is_whitespace bool, is_indention bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - - ind_pos := 0 - for ind_pos < len(indicator) { - if !write(emitter, indicator, &ind_pos) { - return false - } - } - - emitter.whitespace = is_whitespace - emitter.indention = (emitter.indention && is_indention) - emitter.open_ended = false - - return true -} - -func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { - pos := 0 - for pos < len(value) { - if !write(emitter, value, &pos) { - return false - } - } - - emitter.whitespace = false - emitter.indention = false - - return true -} - -func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - - pos := 0 - for pos < len(value) { - if !write(emitter, value, &pos) { - return false - } - } - - emitter.whitespace = false - emitter.indention = false - - return true -} - -func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, - need_whitespace bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - - for i := 0; i < len(value); { - write_it := false - switch value[i] { - case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', - '.', '!', '~', '*', '\'', '(', ')', '[', ']': - write_it = true - default: - write_it = is_alpha(value[i]) - } - if write_it { - if !write(emitter, value, &i) { - return false - } - } else { - w := width(value[i]) - for j := 0; j < w; j++ { - val := value[i] - i++ - - if !put(emitter, '%') { - return false - } - c := val >> 4 - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - - c = val & 0x0f - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - - } - } - } - - emitter.whitespace = false - emitter.indention = false - - return true -} - -func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, - allow_breaks bool) bool { - spaces := false - breaks := false - - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - - for i := 0; i < len(value); { - if is_space(value[i]) { - if allow_breaks && !spaces && - emitter.column > emitter.best_width && - !is_space(value[i+1]) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break_at(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - - emitter.whitespace = false - emitter.indention = false - if emitter.root_context { - emitter.open_ended = true - } - - return true -} - -func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, - allow_breaks bool) bool { - spaces := false - breaks := false - - if !yaml_emitter_write_indicator(emitter, []byte("'"), true, false, false) { - return false - } - - for i := 0; i < len(value); { - if is_space(value[i]) { - if allow_breaks && !spaces && - emitter.column > emitter.best_width && - i > 0 && i < len(value)-1 && - !is_space(value[i+1]) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break_at(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if value[i] == '\'' { - if !put(emitter, '\'') { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - - if !yaml_emitter_write_indicator(emitter, []byte("'"), false, false, false) { - return false - } - - emitter.whitespace = false - emitter.indention = false - - return true -} - -func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, - allow_breaks bool) bool { - - spaces := false - - if !yaml_emitter_write_indicator(emitter, []byte("\""), true, false, false) { - return false - } - - for i := 0; i < len(value); { - if !is_printable_at(value, i) || (!emitter.unicode && !is_ascii(value[i])) || - is_bom_at(value, i) || is_break_at(value, i) || - value[i] == '"' || value[i] == '\\' { - octet := value[i] - - var w int - var v rune - switch { - case octet&0x80 == 0x00: - w, v = 1, rune(octet&0x7F) - case octet&0xE0 == 0xC0: - w, v = 2, rune(octet&0x1F) - case octet&0xF0 == 0xE0: - w, v = 3, rune(octet&0x0F) - case octet&0xF8 == 0xF0: - w, v = 4, rune(octet&0x07) - } - - for k := 1; k < w; k++ { - octet = value[i+k] - v = (v << 6) + (rune(octet) & 0x3F) - } - i += w - - if !put(emitter, '\\') { - return false - } - - switch v { - case 0x00: - if !put(emitter, '0') { - return false - } - case 0x07: - if !put(emitter, 'a') { - return false - } - case 0x08: - if !put(emitter, 'b') { - return false - } - case 0x09: - if !put(emitter, 't') { - return false - } - - case 0x0A: - if !put(emitter, 'n') { - return false - } - - case 0x0B: - if !put(emitter, 'v') { - return false - } - - case 0x0C: - if !put(emitter, 'f') { - return false - } - - case 0x0D: - if !put(emitter, 'r') { - return false - } - - case 0x1B: - if !put(emitter, 'e') { - return false - } - case 0x22: - if !put(emitter, '"') { - return false - } - case 0x5C: - if !put(emitter, '\\') { - return false - } - case 0x85: - if !put(emitter, 'N') { - return false - } - - case 0xA0: - if !put(emitter, '_') { - return false - } - - case 0x2028: - if !put(emitter, 'L') { - return false - } - - case 0x2029: - if !put(emitter, 'P') { - return false - } - default: - if v <= 0xFF { - if !put(emitter, 'x') { - return false - } - w = 2 - } else if v <= 0xFFFF { - if !put(emitter, 'u') { - return false - } - w = 4 - } else { - if !put(emitter, 'U') { - return false - } - w = 8 - } - for k := (w - 1) * 4; k >= 0; k -= 4 { - digit := byte((v >> uint(k)) & 0x0F) - c := digit + '0' - if c > 9 { - c = digit + 'A' - 10 - } - if !put(emitter, c) { - return false - } - } - } - spaces = false - } else if is_space(value[i]) { - if allow_breaks && !spaces && - emitter.column > emitter.best_width && - i > 0 && i < len(value)-1 { - if !yaml_emitter_write_indent(emitter) { - return false - } - if is_space(value[i+1]) { - if !put(emitter, '\\') { - return false - } - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else { - if !write(emitter, value, &i) { - return false - } - spaces = false - } - } - - if !yaml_emitter_write_indicator(emitter, []byte("\""), false, false, false) { - return false - } - - emitter.whitespace = false - emitter.indention = false - - return true -} - -func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { - - if is_space(value[0]) || is_break_at(value, 0) { - indent_hint := []byte{'0' + byte(emitter.best_indent)} - if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { - return false - } - } - - emitter.open_ended = false - - var chomp_hint [1]byte - if len(value) == 0 { - chomp_hint[0] = '-' - } else { - i := len(value) - 1 - for value[i]&0xC0 == 0x80 { - i-- - } - - if !is_break_at(value, i) { - chomp_hint[0] = '-' - } else if i == 0 { - chomp_hint[0] = '+' - emitter.open_ended = true - } else { - for value[i]&0xC0 == 0x80 { - i-- - } - - if is_break_at(value, i) { - chomp_hint[0] = '+' - emitter.open_ended = true - } - } - } - - if chomp_hint[0] != 0 { - if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { - return false - } - } - - return true -} - -func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { - - breaks := true - - if !yaml_emitter_write_indicator(emitter, []byte("|"), true, false, false) { - return false - } - - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - - if !put_break(emitter) { - return false - } - - emitter.indention = true - emitter.whitespace = true - - for i := 0; i < len(value); { - if is_break_at(value, i) { - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - breaks = false - } - } - - return true -} - -func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { - breaks := true - leading_spaces := true - - if !yaml_emitter_write_indicator(emitter, []byte(">"), true, false, false) { - return false - } - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - if !put_break(emitter) { - return false - } - emitter.indention = true - emitter.whitespace = true - - for i := 0; i < len(value); { - if is_break_at(value, i) { - if !breaks && !leading_spaces && value[i] == '\n' { - k := i - for is_break_at(value, k) { - k += width(value[k]) - } - if !is_blankz_at(value, k) { - if !put_break(emitter) { - return false - } - } - } - if !write_break(emitter, value, &i) { - return false - } - emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - leading_spaces = is_blank(value[i]) - } - if !breaks && is_space(value[i]) && !is_space(value[i+1]) && - emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - emitter.indention = false - breaks = false - } - } - - return true -} diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/encode.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/encode.go deleted file mode 100644 index fd9918089..000000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/encode.go +++ /dev/null @@ -1,395 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -import ( - "bytes" - "encoding/base64" - "io" - "math" - "reflect" - "regexp" - "sort" - "strconv" - "time" -) - -var ( - timeTimeType = reflect.TypeOf(time.Time{}) - marshalerType = reflect.TypeOf(new(Marshaler)).Elem() - numberType = reflect.TypeOf(Number("")) - nonPrintable = regexp.MustCompile("[^\t\n\r\u0020-\u007E\u0085\u00A0-\uD7FF\uE000-\uFFFD]") - multiline = regexp.MustCompile("\n|\u0085|\u2028|\u2029") - - shortTags = map[string]string{ - yaml_NULL_TAG: "!!null", - yaml_BOOL_TAG: "!!bool", - yaml_STR_TAG: "!!str", - yaml_INT_TAG: "!!int", - yaml_FLOAT_TAG: "!!float", - yaml_TIMESTAMP_TAG: "!!timestamp", - yaml_SEQ_TAG: "!!seq", - yaml_MAP_TAG: "!!map", - yaml_BINARY_TAG: "!!binary", - } -) - -type Marshaler interface { - MarshalYAML() (tag string, value interface{}, err error) -} - -// An Encoder writes JSON objects to an output stream. -type Encoder struct { - w io.Writer - emitter yaml_emitter_t - event yaml_event_t - flow bool - err error -} - -func Marshal(v interface{}) ([]byte, error) { - b := bytes.Buffer{} - e := NewEncoder(&b) - err := e.Encode(v) - return b.Bytes(), err -} - -// NewEncoder returns a new encoder that writes to w. -func NewEncoder(w io.Writer) *Encoder { - e := &Encoder{w: w} - yaml_emitter_initialize(&e.emitter) - yaml_emitter_set_output_writer(&e.emitter, e.w) - yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) - e.emit() - yaml_document_start_event_initialize(&e.event, nil, nil, true) - e.emit() - - return e -} - -func (e *Encoder) Encode(v interface{}) (err error) { - defer recovery(&err) - - if e.err != nil { - return e.err - } - - e.marshal("", reflect.ValueOf(v), true) - - yaml_document_end_event_initialize(&e.event, true) - e.emit() - e.emitter.open_ended = false - yaml_stream_end_event_initialize(&e.event) - e.emit() - - return nil -} - -func (e *Encoder) emit() { - if !yaml_emitter_emit(&e.emitter, &e.event) { - panic("bad emit") - } -} - -func (e *Encoder) marshal(tag string, v reflect.Value, allowAddr bool) { - vt := v.Type() - - if vt.Implements(marshalerType) { - e.emitMarshaler(tag, v) - return - } - - if vt.Kind() != reflect.Ptr && allowAddr { - if reflect.PtrTo(vt).Implements(marshalerType) { - e.emitAddrMarshaler(tag, v) - return - } - } - - switch v.Kind() { - case reflect.Interface: - if v.IsNil() { - e.emitNil() - } else { - e.marshal(tag, v.Elem(), allowAddr) - } - case reflect.Map: - e.emitMap(tag, v) - case reflect.Ptr: - if v.IsNil() { - e.emitNil() - } else { - e.marshal(tag, v.Elem(), true) - } - case reflect.Struct: - e.emitStruct(tag, v) - case reflect.Slice: - e.emitSlice(tag, v) - case reflect.String: - e.emitString(tag, v) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - e.emitInt(tag, v) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - e.emitUint(tag, v) - case reflect.Float32, reflect.Float64: - e.emitFloat(tag, v) - case reflect.Bool: - e.emitBool(tag, v) - default: - panic("Can't marshal type yet: " + v.Type().String()) - } -} - -func (e *Encoder) emitMap(tag string, v reflect.Value) { - e.mapping(tag, func() { - var keys stringValues = v.MapKeys() - sort.Sort(keys) - for _, k := range keys { - e.marshal("", k, true) - e.marshal("", v.MapIndex(k), true) - } - }) -} - -func (e *Encoder) emitStruct(tag string, v reflect.Value) { - if v.Type() == timeTimeType { - e.emitTime(tag, v) - return - } - - fields := cachedTypeFields(v.Type()) - - e.mapping(tag, func() { - for _, f := range fields { - fv := fieldByIndex(v, f.index) - if !fv.IsValid() || f.omitEmpty && isEmptyValue(fv) { - continue - } - - e.marshal("", reflect.ValueOf(f.name), true) - e.flow = f.flow - e.marshal("", fv, true) - } - }) -} - -func (e *Encoder) emitTime(tag string, v reflect.Value) { - t := v.Interface().(time.Time) - bytes, _ := t.MarshalText() - e.emitScalar(string(bytes), "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - return false -} - -func (e *Encoder) mapping(tag string, f func()) { - implicit := tag == "" - style := yaml_BLOCK_MAPPING_STYLE - if e.flow { - e.flow = false - style = yaml_FLOW_MAPPING_STYLE - } - yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) - e.emit() - - f() - - yaml_mapping_end_event_initialize(&e.event) - e.emit() -} - -func (e *Encoder) emitSlice(tag string, v reflect.Value) { - if v.Type() == byteSliceType { - e.emitBase64(tag, v) - return - } - - implicit := tag == "" - style := yaml_BLOCK_SEQUENCE_STYLE - if e.flow { - e.flow = false - style = yaml_FLOW_SEQUENCE_STYLE - } - yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) - e.emit() - - n := v.Len() - for i := 0; i < n; i++ { - e.marshal("", v.Index(i), true) - } - - yaml_sequence_end_event_initialize(&e.event) - e.emit() -} - -func (e *Encoder) emitBase64(tag string, v reflect.Value) { - if v.IsNil() { - e.emitNil() - return - } - - s := v.Bytes() - - dst := make([]byte, base64.StdEncoding.EncodedLen(len(s))) - - base64.StdEncoding.Encode(dst, s) - e.emitScalar(string(dst), "", yaml_BINARY_TAG, yaml_DOUBLE_QUOTED_SCALAR_STYLE) -} - -func (e *Encoder) emitString(tag string, v reflect.Value) { - var style yaml_scalar_style_t - s := v.String() - - if nonPrintable.MatchString(s) { - e.emitBase64(tag, v) - return - } - - if v.Type() == numberType { - style = yaml_PLAIN_SCALAR_STYLE - } else { - event := yaml_event_t{ - implicit: true, - value: []byte(s), - } - - rtag, _ := resolveInterface(event, false) - if tag == "" && rtag != yaml_STR_TAG { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } else if multiline.MatchString(s) { - style = yaml_LITERAL_SCALAR_STYLE - } else { - style = yaml_PLAIN_SCALAR_STYLE - } - } - - e.emitScalar(s, "", tag, style) -} - -func (e *Encoder) emitBool(tag string, v reflect.Value) { - s := strconv.FormatBool(v.Bool()) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *Encoder) emitInt(tag string, v reflect.Value) { - s := strconv.FormatInt(v.Int(), 10) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *Encoder) emitUint(tag string, v reflect.Value) { - s := strconv.FormatUint(v.Uint(), 10) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *Encoder) emitFloat(tag string, v reflect.Value) { - f := v.Float() - - var s string - switch { - case math.IsNaN(f): - s = ".nan" - case math.IsInf(f, 1): - s = "+.inf" - case math.IsInf(f, -1): - s = "-.inf" - default: - s = strconv.FormatFloat(f, 'g', -1, v.Type().Bits()) - } - - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) -} - -func (e *Encoder) emitNil() { - e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) -} - -func (e *Encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { - implicit := tag == "" - if !implicit { - style = yaml_PLAIN_SCALAR_STYLE - } - - stag := shortTags[tag] - if stag == "" { - stag = tag - } - - yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(stag), []byte(value), implicit, implicit, style) - e.emit() -} - -func (e *Encoder) emitMarshaler(tag string, v reflect.Value) { - if v.Kind() == reflect.Ptr && v.IsNil() { - e.emitNil() - return - } - - m := v.Interface().(Marshaler) - if m == nil { - e.emitNil() - return - } - t, val, err := m.MarshalYAML() - if err != nil { - panic(err) - } - if val == nil { - e.emitNil() - return - } - - e.marshal(t, reflect.ValueOf(val), false) -} - -func (e *Encoder) emitAddrMarshaler(tag string, v reflect.Value) { - if !v.CanAddr() { - e.marshal(tag, v, false) - return - } - - va := v.Addr() - if va.IsNil() { - e.emitNil() - return - } - - m := v.Interface().(Marshaler) - t, val, err := m.MarshalYAML() - if err != nil { - panic(err) - } - - if val == nil { - e.emitNil() - return - } - - e.marshal(t, reflect.ValueOf(val), false) -} diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/parser.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/parser.go deleted file mode 100644 index 8d38e3065..000000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/parser.go +++ /dev/null @@ -1,1230 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -import ( - "bytes" -) - -/* - * The parser implements the following grammar: - * - * stream ::= STREAM-START implicit_document? explicit_document* STREAM-END - * implicit_document ::= block_node DOCUMENT-END* - * explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* - * block_node_or_indentless_sequence ::= - * ALIAS - * | properties (block_content | indentless_block_sequence)? - * | block_content - * | indentless_block_sequence - * block_node ::= ALIAS - * | properties block_content? - * | block_content - * flow_node ::= ALIAS - * | properties flow_content? - * | flow_content - * properties ::= TAG ANCHOR? | ANCHOR TAG? - * block_content ::= block_collection | flow_collection | SCALAR - * flow_content ::= flow_collection | SCALAR - * block_collection ::= block_sequence | block_mapping - * flow_collection ::= flow_sequence | flow_mapping - * block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END - * indentless_sequence ::= (BLOCK-ENTRY block_node?)+ - * block_mapping ::= BLOCK-MAPPING_START - * ((KEY block_node_or_indentless_sequence?)? - * (VALUE block_node_or_indentless_sequence?)?)* - * BLOCK-END - * flow_sequence ::= FLOW-SEQUENCE-START - * (flow_sequence_entry FLOW-ENTRY)* - * flow_sequence_entry? - * FLOW-SEQUENCE-END - * flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - * flow_mapping ::= FLOW-MAPPING-START - * (flow_mapping_entry FLOW-ENTRY)* - * flow_mapping_entry? - * FLOW-MAPPING-END - * flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - */ - -/* - * Peek the next token in the token queue. - */ -func peek_token(parser *yaml_parser_t) *yaml_token_t { - if parser.token_available || yaml_parser_fetch_more_tokens(parser) { - return &parser.tokens[parser.tokens_head] - } - return nil -} - -/* - * Remove the next token from the queue (must be called after peek_token). - */ -func skip_token(parser *yaml_parser_t) { - parser.token_available = false - parser.tokens_parsed++ - parser.stream_end_produced = parser.tokens[parser.tokens_head].token_type == yaml_STREAM_END_TOKEN - parser.tokens_head++ -} - -/* - * Get the next event. - */ - -func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { - /* Erase the event object. */ - *event = yaml_event_t{} - - /* No events after the end of the stream or error. */ - - if parser.stream_end_produced || parser.error != yaml_NO_ERROR || - parser.state == yaml_PARSE_END_STATE { - return true - } - - /* Generate the next event. */ - - return yaml_parser_state_machine(parser, event) -} - -/* - * Set parser error. - */ - -func yaml_parser_set_parser_error(parser *yaml_parser_t, - problem string, problem_mark YAML_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.problem = problem - parser.problem_mark = problem_mark - - return false -} - -func yaml_parser_set_parser_error_context(parser *yaml_parser_t, - context string, context_mark YAML_mark_t, - problem string, problem_mark YAML_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = problem_mark - - return false -} - -/* - * State dispatcher. - */ - -func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { - switch parser.state { - case yaml_PARSE_STREAM_START_STATE: - return yaml_parser_parse_stream_start(parser, event) - - case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, true) - - case yaml_PARSE_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, false) - - case yaml_PARSE_DOCUMENT_CONTENT_STATE: - return yaml_parser_parse_document_content(parser, event) - - case yaml_PARSE_DOCUMENT_END_STATE: - return yaml_parser_parse_document_end(parser, event) - - case yaml_PARSE_BLOCK_NODE_STATE: - return yaml_parser_parse_node(parser, event, true, false) - - case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: - return yaml_parser_parse_node(parser, event, true, true) - - case yaml_PARSE_FLOW_NODE_STATE: - return yaml_parser_parse_node(parser, event, false, false) - - case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, true) - - case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, false) - - case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_indentless_sequence_entry(parser, event) - - case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, true) - - case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, false) - - case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: - return yaml_parser_parse_block_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, true) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, false) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) - - case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, true) - - case yaml_PARSE_FLOW_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, true) - } - - panic("invalid parser state") -} - -/* - * Parse the production: - * stream ::= STREAM-START implicit_document? explicit_document* STREAM-END - * ************ - */ - -func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if token.token_type != yaml_STREAM_START_TOKEN { - return yaml_parser_set_parser_error(parser, - "did not find expected ", token.start_mark) - } - - parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE - *event = yaml_event_t{ - event_type: yaml_STREAM_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - encoding: token.encoding, - } - skip_token(parser) - - return true -} - -/* - * Parse the productions: - * implicit_document ::= block_node DOCUMENT-END* - * * - * explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* - * ************************* - */ - -func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, - implicit bool) bool { - - token := peek_token(parser) - if token == nil { - return false - } - - /* Parse extra document end indicators. */ - - if !implicit { - for token.token_type == yaml_DOCUMENT_END_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } - - /* Parse an implicit document. */ - - if implicit && token.token_type != yaml_VERSION_DIRECTIVE_TOKEN && - token.token_type != yaml_TAG_DIRECTIVE_TOKEN && - token.token_type != yaml_DOCUMENT_START_TOKEN && - token.token_type != yaml_STREAM_END_TOKEN { - if !yaml_parser_process_directives(parser, nil, nil) { - return false - } - - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_BLOCK_NODE_STATE - - *event = yaml_event_t{ - event_type: yaml_DOCUMENT_START_EVENT, - implicit: true, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - } else if token.token_type != yaml_STREAM_END_TOKEN { - /* Parse an explicit document. */ - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - - start_mark := token.start_mark - if !yaml_parser_process_directives(parser, &version_directive, - &tag_directives) { - return false - } - token = peek_token(parser) - if token == nil { - return false - } - if token.token_type != yaml_DOCUMENT_START_TOKEN { - yaml_parser_set_parser_error(parser, - "did not find expected ", token.start_mark) - return false - } - - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE - - end_mark := token.end_mark - - *event = yaml_event_t{ - event_type: yaml_DOCUMENT_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: false, - } - skip_token(parser) - } else { - /* Parse the stream end. */ - parser.state = yaml_PARSE_END_STATE - - *event = yaml_event_t{ - event_type: yaml_STREAM_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - } - return true -} - -/* - * Parse the productions: - * explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* - * *********** - */ - -func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if token.token_type == yaml_VERSION_DIRECTIVE_TOKEN || - token.token_type == yaml_TAG_DIRECTIVE_TOKEN || - token.token_type == yaml_DOCUMENT_START_TOKEN || - token.token_type == yaml_DOCUMENT_END_TOKEN || - token.token_type == yaml_STREAM_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - return yaml_parser_process_empty_scalar(parser, event, - token.start_mark) - } else { - return yaml_parser_parse_node(parser, event, true, false) - } -} - -/* - * Parse the productions: - * implicit_document ::= block_node DOCUMENT-END* - * ************* - * explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* - * ************* - */ - -func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { - implicit := true - - token := peek_token(parser) - if token == nil { - return false - } - - start_mark, end_mark := token.start_mark, token.start_mark - - if token.token_type == yaml_DOCUMENT_END_TOKEN { - end_mark = token.end_mark - skip_token(parser) - implicit = false - } - - parser.tag_directives = parser.tag_directives[:0] - - parser.state = yaml_PARSE_DOCUMENT_START_STATE - *event = yaml_event_t{ - event_type: yaml_DOCUMENT_END_EVENT, - start_mark: start_mark, - end_mark: end_mark, - implicit: implicit, - } - - return true -} - -/* - * Parse the productions: - * block_node_or_indentless_sequence ::= - * ALIAS - * ***** - * | properties (block_content | indentless_block_sequence)? - * ********** * - * | block_content | indentless_block_sequence - * * - * block_node ::= ALIAS - * ***** - * | properties block_content? - * ********** * - * | block_content - * * - * flow_node ::= ALIAS - * ***** - * | properties flow_content? - * ********** * - * | flow_content - * * - * properties ::= TAG ANCHOR? | ANCHOR TAG? - * ************************* - * block_content ::= block_collection | flow_collection | SCALAR - * ****** - * flow_content ::= flow_collection | SCALAR - * ****** - */ - -func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, - block bool, indentless_sequence bool) bool { - - token := peek_token(parser) - if token == nil { - return false - } - - if token.token_type == yaml_ALIAS_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - event_type: yaml_ALIAS_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - anchor: token.value, - } - skip_token(parser) - return true - } else { - start_mark, end_mark := token.start_mark, token.start_mark - - var tag_handle []byte - var tag_suffix, anchor []byte - var tag_mark YAML_mark_t - if token.token_type == yaml_ANCHOR_TOKEN { - anchor = token.value - start_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.token_type == yaml_TAG_TOKEN { - tag_handle = token.value - tag_suffix = token.suffix - tag_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } else if token.token_type == yaml_TAG_TOKEN { - tag_handle = token.value - tag_suffix = token.suffix - start_mark, tag_mark = token.start_mark, token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.token_type == yaml_ANCHOR_TOKEN { - anchor = token.value - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - - } - } - - var tag []byte - if tag_handle != nil { - if len(tag_handle) == 0 { - tag = tag_suffix - tag_handle = nil - tag_suffix = nil - } else { - for i := range parser.tag_directives { - tag_directive := &parser.tag_directives[i] - if bytes.Equal(tag_directive.handle, tag_handle) { - tag = append([]byte(nil), tag_directive.prefix...) - tag = append(tag, tag_suffix...) - tag_handle = nil - tag_suffix = nil - break - } - } - if len(tag) == 0 { - yaml_parser_set_parser_error_context(parser, - "while parsing a node", start_mark, - "found undefined tag handle", tag_mark) - return false - } - } - } - - implicit := len(tag) == 0 - if indentless_sequence && token.token_type == yaml_BLOCK_ENTRY_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - - *event = yaml_event_t{ - event_type: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - - return true - } else { - if token.token_type == yaml_SCALAR_TOKEN { - plain_implicit := false - quoted_implicit := false - end_mark = token.end_mark - if (token.style == yaml_PLAIN_SCALAR_STYLE && len(tag) == 0) || - (len(tag) == 1 && tag[0] == '!') { - plain_implicit = true - } else if len(tag) == 0 { - quoted_implicit = true - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - event_type: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - value: token.value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(token.style), - } - - skip_token(parser) - return true - } else if token.token_type == yaml_FLOW_SEQUENCE_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE - - *event = yaml_event_t{ - event_type: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), - } - - return true - } else if token.token_type == yaml_FLOW_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE - - *event = yaml_event_t{ - event_type: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - - return true - } else if block && token.token_type == yaml_BLOCK_SEQUENCE_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE - - *event = yaml_event_t{ - event_type: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - - return true - } else if block && token.token_type == yaml_BLOCK_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE - - *event = yaml_event_t{ - event_type: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), - } - return true - } else if len(anchor) > 0 || len(tag) > 0 { - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - event_type: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - quoted_implicit: false, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - return true - } else { - msg := "while parsing a block node" - if !block { - msg = "while parsing a flow node" - } - yaml_parser_set_parser_error_context(parser, msg, start_mark, - "did not find expected node content", token.start_mark) - return false - } - } - } - - return false -} - -/* - * Parse the productions: - * block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END - * ******************** *********** * ********* - */ - -func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, - event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.token_type == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.token_type != yaml_BLOCK_ENTRY_TOKEN && - token.token_type != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } else { - parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } else if token.token_type == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - event_type: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true - } else { - mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - return yaml_parser_set_parser_error_context(parser, - "while parsing a block collection", mark, - "did not find expected '-' indicator", token.start_mark) - } -} - -/* - * Parse the productions: - * indentless_sequence ::= (BLOCK-ENTRY block_node?)+ - * *********** * - */ - -func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, - event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if token.token_type == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.token_type != yaml_BLOCK_ENTRY_TOKEN && - token.token_type != yaml_KEY_TOKEN && - token.token_type != yaml_VALUE_TOKEN && - token.token_type != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } else { - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } else { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - event_type: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, - } - return true - } -} - -/* - * Parse the productions: - * block_mapping ::= BLOCK-MAPPING_START - * ******************* - * ((KEY block_node_or_indentless_sequence?)? - * *** * - * (VALUE block_node_or_indentless_sequence?)?)* - * - * BLOCK-END - * ********* - */ - -func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, - event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.token_type == yaml_KEY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.token_type != yaml_KEY_TOKEN && - token.token_type != yaml_VALUE_TOKEN && - token.token_type != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } else { - parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } else if token.token_type == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - event_type: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - return true - } else { - mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - return yaml_parser_set_parser_error_context(parser, - "while parsing a block mapping", mark, - "did not find expected key", token.start_mark) - } -} - -/* - * Parse the productions: - * block_mapping ::= BLOCK-MAPPING_START - * - * ((KEY block_node_or_indentless_sequence?)? - * - * (VALUE block_node_or_indentless_sequence?)?)* - * ***** * - * BLOCK-END - * - */ - -func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, - event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if token.token_type == yaml_VALUE_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.token_type != yaml_KEY_TOKEN && - token.token_type != yaml_VALUE_TOKEN && - token.token_type != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } else { - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } else { - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) - } -} - -/* - * Parse the productions: - * flow_sequence ::= FLOW-SEQUENCE-START - * ******************* - * (flow_sequence_entry FLOW-ENTRY)* - * * ********** - * flow_sequence_entry? - * * - * FLOW-SEQUENCE-END - * ***************** - * flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - * * - */ - -func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, - event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.token_type != yaml_FLOW_SEQUENCE_END_TOKEN { - if !first { - if token.token_type == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow sequence", mark, - "did not find expected ',' or ']'", token.start_mark) - } - } - - if token.token_type == yaml_KEY_TOKEN { - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE - *event = yaml_event_t{ - event_type: yaml_MAPPING_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - implicit: true, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - - skip_token(parser) - return true - } else if token.token_type != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - event_type: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true -} - -/* - * Parse the productions: - * flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - * *** * - */ - -func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, - event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if token.token_type != yaml_VALUE_TOKEN && - token.token_type != yaml_FLOW_ENTRY_TOKEN && - token.token_type != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } else { - mark := token.end_mark - skip_token(parser) - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } -} - -/* - * Parse the productions: - * flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - * ***** * - */ - -func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, - event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if token.token_type == yaml_VALUE_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.token_type != yaml_FLOW_ENTRY_TOKEN && - token.token_type != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -/* - * Parse the productions: - * flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - * * - */ - -func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, - event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE - *event = yaml_event_t{ - event_type: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, - } - - return true -} - -/* - * Parse the productions: - * flow_mapping ::= FLOW-MAPPING-START - * ****************** - * (flow_mapping_entry FLOW-ENTRY)* - * * ********** - * flow_mapping_entry? - * ****************** - * FLOW-MAPPING-END - * **************** - * flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - * * *** * - */ - -func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, - event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.token_type != yaml_FLOW_MAPPING_END_TOKEN { - if !first { - if token.token_type == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow mapping", mark, - "did not find expected ',' or '}'", token.start_mark) - } - } - - if token.token_type == yaml_KEY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.token_type != yaml_VALUE_TOKEN && - token.token_type != yaml_FLOW_ENTRY_TOKEN && - token.token_type != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } else { - parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, - token.start_mark) - } - } else if token.token_type != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - event_type: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true -} - -/* - * Parse the productions: - * flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - * * ***** * - */ - -func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, - event *yaml_event_t, empty bool) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if empty { - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, - token.start_mark) - } - - if token.token_type == yaml_VALUE_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.token_type != yaml_FLOW_ENTRY_TOKEN && - token.token_type != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -/* - * Generate an empty scalar event. - */ - -func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, - mark YAML_mark_t) bool { - *event = yaml_event_t{ - event_type: yaml_SCALAR_EVENT, - start_mark: mark, - end_mark: mark, - value: nil, - implicit: true, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - - return true -} - -/* - * Parse directives. - */ - -func yaml_parser_process_directives(parser *yaml_parser_t, - version_directive_ref **yaml_version_directive_t, - tag_directives_ref *[]yaml_tag_directive_t) bool { - - token := peek_token(parser) - if token == nil { - return false - } - - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - - for token.token_type == yaml_VERSION_DIRECTIVE_TOKEN || - token.token_type == yaml_TAG_DIRECTIVE_TOKEN { - if token.token_type == yaml_VERSION_DIRECTIVE_TOKEN { - if version_directive != nil { - yaml_parser_set_parser_error(parser, - "found duplicate %YAML directive", token.start_mark) - return false - } - if token.major != 1 || - token.minor != 1 { - yaml_parser_set_parser_error(parser, - "found incompatible YAML document", token.start_mark) - return false - } - version_directive = &yaml_version_directive_t{ - major: token.major, - minor: token.minor, - } - } else if token.token_type == yaml_TAG_DIRECTIVE_TOKEN { - value := yaml_tag_directive_t{ - handle: token.value, - prefix: token.prefix, - } - - if !yaml_parser_append_tag_directive(parser, value, false, - token.start_mark) { - return false - } - tag_directives = append(tag_directives, value) - } - - skip_token(parser) - token := peek_token(parser) - if token == nil { - return false - } - } - - for i := range default_tag_directives { - if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { - return false - } - } - - if version_directive_ref != nil { - *version_directive_ref = version_directive - } - if tag_directives_ref != nil { - *tag_directives_ref = tag_directives - } - - return true -} - -/* - * Append a tag directive to the directives stack. - */ - -func yaml_parser_append_tag_directive(parser *yaml_parser_t, - value yaml_tag_directive_t, allow_duplicates bool, mark YAML_mark_t) bool { - for i := range parser.tag_directives { - tag := &parser.tag_directives[i] - if bytes.Equal(value.handle, tag.handle) { - if allow_duplicates { - return true - } - return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) - } - } - - parser.tag_directives = append(parser.tag_directives, value) - return true -} diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/reader.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/reader.go deleted file mode 100644 index 5631da2dc..000000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/reader.go +++ /dev/null @@ -1,465 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -import ( - "io" -) - -/* - * Set the reader error and return 0. - */ - -func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, - offset int, value int) bool { - parser.error = yaml_READER_ERROR - parser.problem = problem - parser.problem_offset = offset - parser.problem_value = value - - return false -} - -/* - * Byte order marks. - */ -const ( - BOM_UTF8 = "\xef\xbb\xbf" - BOM_UTF16LE = "\xff\xfe" - BOM_UTF16BE = "\xfe\xff" -) - -/* - * Determine the input stream encoding by checking the BOM symbol. If no BOM is - * found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. - */ - -func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { - /* Ensure that we had enough bytes in the raw buffer. */ - for !parser.eof && - len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { - if !yaml_parser_update_raw_buffer(parser) { - return false - } - } - - /* Determine the encoding. */ - raw := parser.raw_buffer - pos := parser.raw_buffer_pos - remaining := len(raw) - pos - if remaining >= 2 && - raw[pos] == BOM_UTF16LE[0] && raw[pos+1] == BOM_UTF16LE[1] { - parser.encoding = yaml_UTF16LE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if remaining >= 2 && - raw[pos] == BOM_UTF16BE[0] && raw[pos+1] == BOM_UTF16BE[1] { - parser.encoding = yaml_UTF16BE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if remaining >= 3 && - raw[pos] == BOM_UTF8[0] && raw[pos+1] == BOM_UTF8[1] && raw[pos+2] == BOM_UTF8[2] { - parser.encoding = yaml_UTF8_ENCODING - parser.raw_buffer_pos += 3 - parser.offset += 3 - } else { - parser.encoding = yaml_UTF8_ENCODING - } - - return true -} - -/* - * Update the raw buffer. - */ - -func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { - size_read := 0 - - /* Return if the raw buffer is full. */ - if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { - return true - } - - /* Return on EOF. */ - - if parser.eof { - return true - } - - /* Move the remaining bytes in the raw buffer to the beginning. */ - if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { - copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) - } - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] - parser.raw_buffer_pos = 0 - - /* Call the read handler to fill the buffer. */ - size_read, err := parser.read_handler(parser, - parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] - - if err == io.EOF { - parser.eof = true - } else if err != nil { - return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), - parser.offset, -1) - } - - return true -} - -/* - * Ensure that the buffer contains at least `length` characters. - * Return 1 on success, 0 on failure. - * - * The length is supposed to be significantly less that the buffer size. - */ - -func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { - /* Read handler must be set. */ - if parser.read_handler == nil { - panic("read handler must be set") - } - - /* If the EOF flag is set and the raw buffer is empty, do nothing. */ - - if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { - return true - } - - /* Return if the buffer contains enough characters. */ - - if parser.unread >= length { - return true - } - - /* Determine the input encoding if it is not known yet. */ - - if parser.encoding == yaml_ANY_ENCODING { - if !yaml_parser_determine_encoding(parser) { - return false - } - } - - /* Move the unread characters to the beginning of the buffer. */ - buffer_end := len(parser.buffer) - if 0 < parser.buffer_pos && - parser.buffer_pos < buffer_end { - copy(parser.buffer, parser.buffer[parser.buffer_pos:]) - buffer_end -= parser.buffer_pos - parser.buffer_pos = 0 - } else if parser.buffer_pos == buffer_end { - buffer_end = 0 - parser.buffer_pos = 0 - } - - parser.buffer = parser.buffer[:cap(parser.buffer)] - - /* Fill the buffer until it has enough characters. */ - first := true - for parser.unread < length { - /* Fill the raw buffer if necessary. */ - - if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { - if !yaml_parser_update_raw_buffer(parser) { - parser.buffer = parser.buffer[:buffer_end] - return false - } - } - first = false - - /* Decode the raw buffer. */ - for parser.raw_buffer_pos != len(parser.raw_buffer) { - var value rune - var w int - - raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos - incomplete := false - - /* Decode the next character. */ - - switch parser.encoding { - case yaml_UTF8_ENCODING: - - /* - * Decode a UTF-8 character. Check RFC 3629 - * (http://www.ietf.org/rfc/rfc3629.txt) for more details. - * - * The following table (taken from the RFC) is used for - * decoding. - * - * Char. number range | UTF-8 octet sequence - * (hexadecimal) | (binary) - * --------------------+------------------------------------ - * 0000 0000-0000 007F | 0xxxxxxx - * 0000 0080-0000 07FF | 110xxxxx 10xxxxxx - * 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx - * 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - * - * Additionally, the characters in the range 0xD800-0xDFFF - * are prohibited as they are reserved for use with UTF-16 - * surrogate pairs. - */ - - /* Determine the length of the UTF-8 sequence. */ - - octet := parser.raw_buffer[parser.raw_buffer_pos] - w = width(octet) - - /* Check if the leading octet is valid. */ - - if w == 0 { - return yaml_parser_set_reader_error(parser, - "invalid leading UTF-8 octet", - parser.offset, int(octet)) - } - - /* Check if the raw buffer contains an incomplete character. */ - - if w > raw_unread { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-8 octet sequence", - parser.offset, -1) - } - incomplete = true - break - } - - /* Decode the leading octet. */ - switch { - case octet&0x80 == 0x00: - value = rune(octet & 0x7F) - case octet&0xE0 == 0xC0: - value = rune(octet & 0x1F) - case octet&0xF0 == 0xE0: - value = rune(octet & 0x0F) - case octet&0xF8 == 0xF0: - value = rune(octet & 0x07) - default: - value = 0 - } - - /* Check and decode the trailing octets. */ - - for k := 1; k < w; k++ { - octet = parser.raw_buffer[parser.raw_buffer_pos+k] - - /* Check if the octet is valid. */ - - if (octet & 0xC0) != 0x80 { - return yaml_parser_set_reader_error(parser, - "invalid trailing UTF-8 octet", - parser.offset+k, int(octet)) - } - - /* Decode the octet. */ - - value = (value << 6) + rune(octet&0x3F) - } - - /* Check the length of the sequence against the value. */ - switch { - case w == 1: - case w == 2 && value >= 0x80: - case w == 3 && value >= 0x800: - case w == 4 && value >= 0x10000: - default: - return yaml_parser_set_reader_error(parser, - "invalid length of a UTF-8 sequence", - parser.offset, -1) - } - - /* Check the range of the value. */ - - if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { - return yaml_parser_set_reader_error(parser, - "invalid Unicode character", - parser.offset, int(value)) - } - case yaml_UTF16LE_ENCODING, - yaml_UTF16BE_ENCODING: - - var low, high int - if parser.encoding == yaml_UTF16LE_ENCODING { - low, high = 0, 1 - } else { - high, low = 1, 0 - } - - /* - * The UTF-16 encoding is not as simple as one might - * naively think. Check RFC 2781 - * (http://www.ietf.org/rfc/rfc2781.txt). - * - * Normally, two subsequent bytes describe a Unicode - * character. However a special technique (called a - * surrogate pair) is used for specifying character - * values larger than 0xFFFF. - * - * A surrogate pair consists of two pseudo-characters: - * high surrogate area (0xD800-0xDBFF) - * low surrogate area (0xDC00-0xDFFF) - * - * The following formulas are used for decoding - * and encoding characters using surrogate pairs: - * - * U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) - * U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) - * W1 = 110110yyyyyyyyyy - * W2 = 110111xxxxxxxxxx - * - * where U is the character value, W1 is the high surrogate - * area, W2 is the low surrogate area. - */ - - /* Check for incomplete UTF-16 character. */ - - if raw_unread < 2 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 character", - parser.offset, -1) - } - incomplete = true - break - } - - /* Get the character. */ - value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) - - /* Check for unexpected low surrogate area. */ - - if (value & 0xFC00) == 0xDC00 { - return yaml_parser_set_reader_error(parser, - "unexpected low surrogate area", - parser.offset, int(value)) - } - - /* Check for a high surrogate area. */ - - if (value & 0xFC00) == 0xD800 { - - w = 4 - - /* Check for incomplete surrogate pair. */ - - if raw_unread < 4 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 surrogate pair", - parser.offset, -1) - } - incomplete = true - break - } - - /* Get the next character. */ - - value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) - - /* Check for a low surrogate area. */ - - if (value2 & 0xFC00) != 0xDC00 { - return yaml_parser_set_reader_error(parser, - "expected low surrogate area", - parser.offset+2, int(value2)) - } - - /* Generate the value of the surrogate pair. */ - - value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) - } else { - w = 2 - } - - break - - default: - panic("Impossible") /* Impossible. */ - } - - /* Check if the raw buffer contains enough bytes to form a character. */ - - if incomplete { - break - } - - /* - * Check if the character is in the allowed range: - * #x9 | #xA | #xD | [#x20-#x7E] (8 bit) - * | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) - * | [#x10000-#x10FFFF] (32 bit) - */ - - if !(value == 0x09 || value == 0x0A || value == 0x0D || - (value >= 0x20 && value <= 0x7E) || - (value == 0x85) || (value >= 0xA0 && value <= 0xD7FF) || - (value >= 0xE000 && value <= 0xFFFD) || - (value >= 0x10000 && value <= 0x10FFFF)) { - return yaml_parser_set_reader_error(parser, - "control characters are not allowed", - parser.offset, int(value)) - } - - /* Move the raw pointers. */ - - parser.raw_buffer_pos += w - parser.offset += w - - /* Finally put the character into the buffer. */ - - /* 0000 0000-0000 007F . 0xxxxxxx */ - if value <= 0x7F { - parser.buffer[buffer_end] = byte(value) - } else if value <= 0x7FF { - /* 0000 0080-0000 07FF . 110xxxxx 10xxxxxx */ - parser.buffer[buffer_end] = byte(0xC0 + (value >> 6)) - parser.buffer[buffer_end+1] = byte(0x80 + (value & 0x3F)) - } else if value <= 0xFFFF { - /* 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx */ - parser.buffer[buffer_end] = byte(0xE0 + (value >> 12)) - parser.buffer[buffer_end+1] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_end+2] = byte(0x80 + (value & 0x3F)) - } else { - /* 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx */ - parser.buffer[buffer_end] = byte(0xF0 + (value >> 18)) - parser.buffer[buffer_end+1] = byte(0x80 + ((value >> 12) & 0x3F)) - parser.buffer[buffer_end+2] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_end+3] = byte(0x80 + (value & 0x3F)) - } - - buffer_end += w - parser.unread++ - } - - /* On EOF, put NUL into the buffer and return. */ - - if parser.eof { - parser.buffer[buffer_end] = 0 - buffer_end++ - parser.buffer = parser.buffer[:buffer_end] - parser.unread++ - return true - } - - } - - parser.buffer = parser.buffer[:buffer_end] - return true -} diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/resolver.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/resolver.go deleted file mode 100644 index fb9e8be89..000000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/resolver.go +++ /dev/null @@ -1,449 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -import ( - "bytes" - "encoding/base64" - "fmt" - "math" - "reflect" - "regexp" - "strconv" - "strings" - "time" -) - -var byteSliceType = reflect.TypeOf([]byte(nil)) - -var binary_tags = [][]byte{[]byte("!binary"), []byte(yaml_BINARY_TAG)} -var bool_values map[string]bool -var null_values map[string]bool - -var signs = []byte{'-', '+'} -var nulls = []byte{'~', 'n', 'N'} -var bools = []byte{'t', 'T', 'f', 'F', 'y', 'Y', 'n', 'N', 'o', 'O'} - -var timestamp_regexp *regexp.Regexp -var ymd_regexp *regexp.Regexp - -func init() { - bool_values = make(map[string]bool) - bool_values["y"] = true - bool_values["yes"] = true - bool_values["n"] = false - bool_values["no"] = false - bool_values["true"] = true - bool_values["false"] = false - bool_values["on"] = true - bool_values["off"] = false - - null_values = make(map[string]bool) - null_values["~"] = true - null_values["null"] = true - null_values["Null"] = true - null_values["NULL"] = true - - timestamp_regexp = regexp.MustCompile("^([0-9][0-9][0-9][0-9])-([0-9][0-9]?)-([0-9][0-9]?)(?:(?:[Tt]|[ \t]+)([0-9][0-9]?):([0-9][0-9]):([0-9][0-9])(?:\\.([0-9]*))?(?:[ \t]*(?:Z|([-+][0-9][0-9]?)(?::([0-9][0-9])?)?))?)?$") - ymd_regexp = regexp.MustCompile("^([0-9][0-9][0-9][0-9])-([0-9][0-9]?)-([0-9][0-9]?)$") -} - -func resolve(event yaml_event_t, v reflect.Value, useNumber bool) (string, error) { - val := string(event.value) - - if null_values[val] { - v.Set(reflect.Zero(v.Type())) - return yaml_NULL_TAG, nil - } - - switch v.Kind() { - case reflect.String: - if useNumber && v.Type() == numberType { - tag, i := resolveInterface(event, useNumber) - if n, ok := i.(Number); ok { - v.Set(reflect.ValueOf(n)) - return tag, nil - } - return "", fmt.Errorf("Not a number: '%s' at %s", event.value, event.start_mark) - } - - return resolve_string(val, v, event) - case reflect.Bool: - return resolve_bool(val, v, event) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return resolve_int(val, v, useNumber, event) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return resolve_uint(val, v, useNumber, event) - case reflect.Float32, reflect.Float64: - return resolve_float(val, v, useNumber, event) - case reflect.Interface: - _, i := resolveInterface(event, useNumber) - if i != nil { - v.Set(reflect.ValueOf(i)) - } else { - v.Set(reflect.Zero(v.Type())) - } - - case reflect.Struct: - return resolve_time(val, v, event) - case reflect.Slice: - if v.Type() != byteSliceType { - return "", fmt.Errorf("Cannot resolve %s into %s at %s", val, v.String(), event.start_mark) - } - b, err := decode_binary(event.value, event) - if err != nil { - return "", err - } - - v.Set(reflect.ValueOf(b)) - default: - return "", fmt.Errorf("Unknown resolution for '%s' using %s at %s", val, v.String(), event.start_mark) - } - - return yaml_STR_TAG, nil -} - -func hasBinaryTag(event yaml_event_t) bool { - for _, tag := range binary_tags { - if bytes.Equal(event.tag, tag) { - return true - } - } - return false -} - -func decode_binary(value []byte, event yaml_event_t) ([]byte, error) { - b := make([]byte, base64.StdEncoding.DecodedLen(len(value))) - n, err := base64.StdEncoding.Decode(b, value) - if err != nil { - return nil, fmt.Errorf("Invalid base64 text: '%s' at %s", string(b), event.start_mark) - } - return b[:n], nil -} - -func resolve_string(val string, v reflect.Value, event yaml_event_t) (string, error) { - if len(event.tag) > 0 { - if hasBinaryTag(event) { - b, err := decode_binary(event.value, event) - if err != nil { - return "", err - } - val = string(b) - } - } - v.SetString(val) - return yaml_STR_TAG, nil -} - -func resolve_bool(val string, v reflect.Value, event yaml_event_t) (string, error) { - b, found := bool_values[strings.ToLower(val)] - if !found { - return "", fmt.Errorf("Invalid boolean: '%s' at %s", val, event.start_mark) - } - - v.SetBool(b) - return yaml_BOOL_TAG, nil -} - -func resolve_int(val string, v reflect.Value, useNumber bool, event yaml_event_t) (string, error) { - original := val - val = strings.Replace(val, "_", "", -1) - var value uint64 - - isNumberValue := v.Type() == numberType - - sign := int64(1) - if val[0] == '-' { - sign = -1 - val = val[1:] - } else if val[0] == '+' { - val = val[1:] - } - - base := 0 - if val == "0" { - if isNumberValue { - v.SetString("0") - } else { - v.Set(reflect.Zero(v.Type())) - } - - return yaml_INT_TAG, nil - } - - if strings.HasPrefix(val, "0o") { - base = 8 - val = val[2:] - } - - value, err := strconv.ParseUint(val, base, 64) - if err != nil { - return "", fmt.Errorf("Invalid integer: '%s' at %s", original, event.start_mark) - } - - var val64 int64 - if value <= math.MaxInt64 { - val64 = int64(value) - if sign == -1 { - val64 = -val64 - } - } else if sign == -1 && value == uint64(math.MaxInt64)+1 { - val64 = math.MinInt64 - } else { - return "", fmt.Errorf("Invalid integer: '%s' at %s", original, event.start_mark) - } - - if isNumberValue { - v.SetString(strconv.FormatInt(val64, 10)) - } else { - if v.OverflowInt(val64) { - return "", fmt.Errorf("Invalid integer: '%s' at %s", original, event.start_mark) - } - v.SetInt(val64) - } - - return yaml_INT_TAG, nil -} - -func resolve_uint(val string, v reflect.Value, useNumber bool, event yaml_event_t) (string, error) { - original := val - val = strings.Replace(val, "_", "", -1) - var value uint64 - - isNumberValue := v.Type() == numberType - - if val[0] == '-' { - return "", fmt.Errorf("Unsigned int with negative value: '%s' at %s", original, event.start_mark) - } - - if val[0] == '+' { - val = val[1:] - } - - base := 0 - if val == "0" { - if isNumberValue { - v.SetString("0") - } else { - v.Set(reflect.Zero(v.Type())) - } - - return yaml_INT_TAG, nil - } - - if strings.HasPrefix(val, "0o") { - base = 8 - val = val[2:] - } - - value, err := strconv.ParseUint(val, base, 64) - if err != nil { - return "", fmt.Errorf("Invalid unsigned integer: '%s' at %s", val, event.start_mark) - } - - if isNumberValue { - v.SetString(strconv.FormatUint(value, 10)) - } else { - if v.OverflowUint(value) { - return "", fmt.Errorf("Invalid unsigned integer: '%s' at %s", val, event.start_mark) - } - - v.SetUint(value) - } - - return yaml_INT_TAG, nil -} - -func resolve_float(val string, v reflect.Value, useNumber bool, event yaml_event_t) (string, error) { - val = strings.Replace(val, "_", "", -1) - var value float64 - - isNumberValue := v.Type() == numberType - typeBits := 64 - if !isNumberValue { - typeBits = v.Type().Bits() - } - - sign := 1 - if val[0] == '-' { - sign = -1 - val = val[1:] - } else if val[0] == '+' { - val = val[1:] - } - - valLower := strings.ToLower(val) - if valLower == ".inf" { - value = math.Inf(sign) - } else if valLower == ".nan" { - value = math.NaN() - } else { - var err error - value, err = strconv.ParseFloat(val, typeBits) - value *= float64(sign) - - if err != nil { - return "", fmt.Errorf("Invalid float: '%s' at %s", val, event.start_mark) - } - } - - if isNumberValue { - v.SetString(strconv.FormatFloat(value, 'g', -1, typeBits)) - } else { - if v.OverflowFloat(value) { - return "", fmt.Errorf("Invalid float: '%s' at %s", val, event.start_mark) - } - - v.SetFloat(value) - } - - return yaml_FLOAT_TAG, nil -} - -func resolve_time(val string, v reflect.Value, event yaml_event_t) (string, error) { - var parsedTime time.Time - matches := ymd_regexp.FindStringSubmatch(val) - if len(matches) > 0 { - year, _ := strconv.Atoi(matches[1]) - month, _ := strconv.Atoi(matches[2]) - day, _ := strconv.Atoi(matches[3]) - parsedTime = time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC) - } else { - matches = timestamp_regexp.FindStringSubmatch(val) - if len(matches) == 0 { - return "", fmt.Errorf("Invalid timestamp: '%s' at %s", val, event.start_mark) - } - - year, _ := strconv.Atoi(matches[1]) - month, _ := strconv.Atoi(matches[2]) - day, _ := strconv.Atoi(matches[3]) - hour, _ := strconv.Atoi(matches[4]) - min, _ := strconv.Atoi(matches[5]) - sec, _ := strconv.Atoi(matches[6]) - - nsec := 0 - if matches[7] != "" { - millis, _ := strconv.Atoi(matches[7]) - nsec = int(time.Duration(millis) * time.Millisecond) - } - - loc := time.UTC - if matches[8] != "" { - sign := matches[8][0] - hr, _ := strconv.Atoi(matches[8][1:]) - min := 0 - if matches[9] != "" { - min, _ = strconv.Atoi(matches[9]) - } - - zoneOffset := (hr*60 + min) * 60 - if sign == '-' { - zoneOffset = -zoneOffset - } - - loc = time.FixedZone("", zoneOffset) - } - parsedTime = time.Date(year, time.Month(month), day, hour, min, sec, nsec, loc) - } - - v.Set(reflect.ValueOf(parsedTime)) - return "", nil -} - -func resolveInterface(event yaml_event_t, useNumber bool) (string, interface{}) { - val := string(event.value) - if len(event.tag) == 0 && !event.implicit { - return "", val - } - - if len(val) == 0 { - return yaml_NULL_TAG, nil - } - - var result interface{} - - sign := false - c := val[0] - switch { - case bytes.IndexByte(signs, c) != -1: - sign = true - fallthrough - case c >= '0' && c <= '9': - i := int64(0) - result = &i - if useNumber { - var n Number - result = &n - } - - v := reflect.ValueOf(result).Elem() - if _, err := resolve_int(val, v, useNumber, event); err == nil { - return yaml_INT_TAG, v.Interface() - } - - f := float64(0) - result = &f - if useNumber { - var n Number - result = &n - } - - v = reflect.ValueOf(result).Elem() - if _, err := resolve_float(val, v, useNumber, event); err == nil { - return yaml_FLOAT_TAG, v.Interface() - } - - if !sign { - t := time.Time{} - if _, err := resolve_time(val, reflect.ValueOf(&t).Elem(), event); err == nil { - return "", t - } - } - case bytes.IndexByte(nulls, c) != -1: - if null_values[val] { - return yaml_NULL_TAG, nil - } - b := false - if _, err := resolve_bool(val, reflect.ValueOf(&b).Elem(), event); err == nil { - return yaml_BOOL_TAG, b - } - case c == '.': - f := float64(0) - result = &f - if useNumber { - var n Number - result = &n - } - - v := reflect.ValueOf(result).Elem() - if _, err := resolve_float(val, v, useNumber, event); err == nil { - return yaml_FLOAT_TAG, v.Interface() - } - case bytes.IndexByte(bools, c) != -1: - b := false - if _, err := resolve_bool(val, reflect.ValueOf(&b).Elem(), event); err == nil { - return yaml_BOOL_TAG, b - } - } - - if hasBinaryTag(event) { - bytes, err := decode_binary(event.value, event) - if err == nil { - return yaml_BINARY_TAG, bytes - } - } - - return yaml_STR_TAG, val -} diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/run_parser.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/run_parser.go deleted file mode 100644 index 25c29816e..000000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/run_parser.go +++ /dev/null @@ -1,62 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -import ( - "fmt" - "os" -) - -func Run_parser(cmd string, args []string) { - for i := 0; i < len(args); i++ { - fmt.Printf("[%d] Scanning '%s'", i, args[i]) - file, err := os.Open(args[i]) - if err != nil { - panic(fmt.Sprintf("Invalid file '%s': %s", args[i], err.Error())) - } - - parser := yaml_parser_t{} - yaml_parser_initialize(&parser) - yaml_parser_set_input_reader(&parser, file) - - failed := false - token := yaml_token_t{} - count := 0 - for { - if !yaml_parser_scan(&parser, &token) { - failed = true - break - } - - if token.token_type == yaml_STREAM_END_TOKEN { - break - } - count++ - } - - file.Close() - - msg := "SUCCESS" - if failed { - msg = "FAILED" - if parser.error != yaml_NO_ERROR { - m := parser.problem_mark - fmt.Printf("ERROR: (%s) %s @ line: %d col: %d\n", - parser.context, parser.problem, m.line, m.column) - } - } - fmt.Printf("%s (%d tokens)\n", msg, count) - } -} diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/scanner.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/scanner.go deleted file mode 100644 index 5c080a063..000000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/scanner.go +++ /dev/null @@ -1,3318 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -import ( - "bytes" -) - -/* - * Introduction - * ************ - * - * The following notes assume that you are familiar with the YAML specification - * (http://yaml.org/spec/cvs/current.html). We mostly follow it, although in - * some cases we are less restrictive that it requires. - * - * The process of transforming a YAML stream into a sequence of events is - * divided on two steps: Scanning and Parsing. - * - * The Scanner transforms the input stream into a sequence of tokens, while the - * parser transform the sequence of tokens produced by the Scanner into a - * sequence of parsing events. - * - * The Scanner is rather clever and complicated. The Parser, on the contrary, - * is a straightforward implementation of a recursive-descendant parser (or, - * LL(1) parser, as it is usually called). - * - * Actually there are two issues of Scanning that might be called "clever", the - * rest is quite straightforward. The issues are "block collection start" and - * "simple keys". Both issues are explained below in details. - * - * Here the Scanning step is explained and implemented. We start with the list - * of all the tokens produced by the Scanner together with short descriptions. - * - * Now, tokens: - * - * STREAM-START(encoding) # The stream start. - * STREAM-END # The stream end. - * VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. - * TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. - * DOCUMENT-START # '---' - * DOCUMENT-END # '...' - * BLOCK-SEQUENCE-START # Indentation increase denoting a block - * BLOCK-MAPPING-START # sequence or a block mapping. - * BLOCK-END # Indentation decrease. - * FLOW-SEQUENCE-START # '[' - * FLOW-SEQUENCE-END # ']' - * BLOCK-SEQUENCE-START # '{' - * BLOCK-SEQUENCE-END # '}' - * BLOCK-ENTRY # '-' - * FLOW-ENTRY # ',' - * KEY # '?' or nothing (simple keys). - * VALUE # ':' - * ALIAS(anchor) # '*anchor' - * ANCHOR(anchor) # '&anchor' - * TAG(handle,suffix) # '!handle!suffix' - * SCALAR(value,style) # A scalar. - * - * The following two tokens are "virtual" tokens denoting the beginning and the - * end of the stream: - * - * STREAM-START(encoding) - * STREAM-END - * - * We pass the information about the input stream encoding with the - * STREAM-START token. - * - * The next two tokens are responsible for tags: - * - * VERSION-DIRECTIVE(major,minor) - * TAG-DIRECTIVE(handle,prefix) - * - * Example: - * - * %YAML 1.1 - * %TAG ! !foo - * %TAG !yaml! tag:yaml.org,2002: - * --- - * - * The correspoding sequence of tokens: - * - * STREAM-START(utf-8) - * VERSION-DIRECTIVE(1,1) - * TAG-DIRECTIVE("!","!foo") - * TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") - * DOCUMENT-START - * STREAM-END - * - * Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole - * line. - * - * The document start and end indicators are represented by: - * - * DOCUMENT-START - * DOCUMENT-END - * - * Note that if a YAML stream contains an implicit document (without '---' - * and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be - * produced. - * - * In the following examples, we present whole documents together with the - * produced tokens. - * - * 1. An implicit document: - * - * 'a scalar' - * - * Tokens: - * - * STREAM-START(utf-8) - * SCALAR("a scalar",single-quoted) - * STREAM-END - * - * 2. An explicit document: - * - * --- - * 'a scalar' - * ... - * - * Tokens: - * - * STREAM-START(utf-8) - * DOCUMENT-START - * SCALAR("a scalar",single-quoted) - * DOCUMENT-END - * STREAM-END - * - * 3. Several documents in a stream: - * - * 'a scalar' - * --- - * 'another scalar' - * --- - * 'yet another scalar' - * - * Tokens: - * - * STREAM-START(utf-8) - * SCALAR("a scalar",single-quoted) - * DOCUMENT-START - * SCALAR("another scalar",single-quoted) - * DOCUMENT-START - * SCALAR("yet another scalar",single-quoted) - * STREAM-END - * - * We have already introduced the SCALAR token above. The following tokens are - * used to describe aliases, anchors, tag, and scalars: - * - * ALIAS(anchor) - * ANCHOR(anchor) - * TAG(handle,suffix) - * SCALAR(value,style) - * - * The following series of examples illustrate the usage of these tokens: - * - * 1. A recursive sequence: - * - * &A [ *A ] - * - * Tokens: - * - * STREAM-START(utf-8) - * ANCHOR("A") - * FLOW-SEQUENCE-START - * ALIAS("A") - * FLOW-SEQUENCE-END - * STREAM-END - * - * 2. A tagged scalar: - * - * !!float "3.14" # A good approximation. - * - * Tokens: - * - * STREAM-START(utf-8) - * TAG("!!","float") - * SCALAR("3.14",double-quoted) - * STREAM-END - * - * 3. Various scalar styles: - * - * --- # Implicit empty plain scalars do not produce tokens. - * --- a plain scalar - * --- 'a single-quoted scalar' - * --- "a double-quoted scalar" - * --- |- - * a literal scalar - * --- >- - * a folded - * scalar - * - * Tokens: - * - * STREAM-START(utf-8) - * DOCUMENT-START - * DOCUMENT-START - * SCALAR("a plain scalar",plain) - * DOCUMENT-START - * SCALAR("a single-quoted scalar",single-quoted) - * DOCUMENT-START - * SCALAR("a double-quoted scalar",double-quoted) - * DOCUMENT-START - * SCALAR("a literal scalar",literal) - * DOCUMENT-START - * SCALAR("a folded scalar",folded) - * STREAM-END - * - * Now it's time to review collection-related tokens. We will start with - * flow collections: - * - * FLOW-SEQUENCE-START - * FLOW-SEQUENCE-END - * FLOW-MAPPING-START - * FLOW-MAPPING-END - * FLOW-ENTRY - * KEY - * VALUE - * - * The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and - * FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' - * correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the - * indicators '?' and ':', which are used for denoting mapping keys and values, - * are represented by the KEY and VALUE tokens. - * - * The following examples show flow collections: - * - * 1. A flow sequence: - * - * [item 1, item 2, item 3] - * - * Tokens: - * - * STREAM-START(utf-8) - * FLOW-SEQUENCE-START - * SCALAR("item 1",plain) - * FLOW-ENTRY - * SCALAR("item 2",plain) - * FLOW-ENTRY - * SCALAR("item 3",plain) - * FLOW-SEQUENCE-END - * STREAM-END - * - * 2. A flow mapping: - * - * { - * a simple key: a value, # Note that the KEY token is produced. - * ? a complex key: another value, - * } - * - * Tokens: - * - * STREAM-START(utf-8) - * FLOW-MAPPING-START - * KEY - * SCALAR("a simple key",plain) - * VALUE - * SCALAR("a value",plain) - * FLOW-ENTRY - * KEY - * SCALAR("a complex key",plain) - * VALUE - * SCALAR("another value",plain) - * FLOW-ENTRY - * FLOW-MAPPING-END - * STREAM-END - * - * A simple key is a key which is not denoted by the '?' indicator. Note that - * the Scanner still produce the KEY token whenever it encounters a simple key. - * - * For scanning block collections, the following tokens are used (note that we - * repeat KEY and VALUE here): - * - * BLOCK-SEQUENCE-START - * BLOCK-MAPPING-START - * BLOCK-END - * BLOCK-ENTRY - * KEY - * VALUE - * - * The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation - * increase that precedes a block collection (cf. the INDENT token in Python). - * The token BLOCK-END denote indentation decrease that ends a block collection - * (cf. the DEDENT token in Python). However YAML has some syntax pecularities - * that makes detections of these tokens more complex. - * - * The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators - * '-', '?', and ':' correspondingly. - * - * The following examples show how the tokens BLOCK-SEQUENCE-START, - * BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: - * - * 1. Block sequences: - * - * - item 1 - * - item 2 - * - - * - item 3.1 - * - item 3.2 - * - - * key 1: value 1 - * key 2: value 2 - * - * Tokens: - * - * STREAM-START(utf-8) - * BLOCK-SEQUENCE-START - * BLOCK-ENTRY - * SCALAR("item 1",plain) - * BLOCK-ENTRY - * SCALAR("item 2",plain) - * BLOCK-ENTRY - * BLOCK-SEQUENCE-START - * BLOCK-ENTRY - * SCALAR("item 3.1",plain) - * BLOCK-ENTRY - * SCALAR("item 3.2",plain) - * BLOCK-END - * BLOCK-ENTRY - * BLOCK-MAPPING-START - * KEY - * SCALAR("key 1",plain) - * VALUE - * SCALAR("value 1",plain) - * KEY - * SCALAR("key 2",plain) - * VALUE - * SCALAR("value 2",plain) - * BLOCK-END - * BLOCK-END - * STREAM-END - * - * 2. Block mappings: - * - * a simple key: a value # The KEY token is produced here. - * ? a complex key - * : another value - * a mapping: - * key 1: value 1 - * key 2: value 2 - * a sequence: - * - item 1 - * - item 2 - * - * Tokens: - * - * STREAM-START(utf-8) - * BLOCK-MAPPING-START - * KEY - * SCALAR("a simple key",plain) - * VALUE - * SCALAR("a value",plain) - * KEY - * SCALAR("a complex key",plain) - * VALUE - * SCALAR("another value",plain) - * KEY - * SCALAR("a mapping",plain) - * BLOCK-MAPPING-START - * KEY - * SCALAR("key 1",plain) - * VALUE - * SCALAR("value 1",plain) - * KEY - * SCALAR("key 2",plain) - * VALUE - * SCALAR("value 2",plain) - * BLOCK-END - * KEY - * SCALAR("a sequence",plain) - * VALUE - * BLOCK-SEQUENCE-START - * BLOCK-ENTRY - * SCALAR("item 1",plain) - * BLOCK-ENTRY - * SCALAR("item 2",plain) - * BLOCK-END - * BLOCK-END - * STREAM-END - * - * YAML does not always require to start a new block collection from a new - * line. If the current line contains only '-', '?', and ':' indicators, a new - * block collection may start at the current line. The following examples - * illustrate this case: - * - * 1. Collections in a sequence: - * - * - - item 1 - * - item 2 - * - key 1: value 1 - * key 2: value 2 - * - ? complex key - * : complex value - * - * Tokens: - * - * STREAM-START(utf-8) - * BLOCK-SEQUENCE-START - * BLOCK-ENTRY - * BLOCK-SEQUENCE-START - * BLOCK-ENTRY - * SCALAR("item 1",plain) - * BLOCK-ENTRY - * SCALAR("item 2",plain) - * BLOCK-END - * BLOCK-ENTRY - * BLOCK-MAPPING-START - * KEY - * SCALAR("key 1",plain) - * VALUE - * SCALAR("value 1",plain) - * KEY - * SCALAR("key 2",plain) - * VALUE - * SCALAR("value 2",plain) - * BLOCK-END - * BLOCK-ENTRY - * BLOCK-MAPPING-START - * KEY - * SCALAR("complex key") - * VALUE - * SCALAR("complex value") - * BLOCK-END - * BLOCK-END - * STREAM-END - * - * 2. Collections in a mapping: - * - * ? a sequence - * : - item 1 - * - item 2 - * ? a mapping - * : key 1: value 1 - * key 2: value 2 - * - * Tokens: - * - * STREAM-START(utf-8) - * BLOCK-MAPPING-START - * KEY - * SCALAR("a sequence",plain) - * VALUE - * BLOCK-SEQUENCE-START - * BLOCK-ENTRY - * SCALAR("item 1",plain) - * BLOCK-ENTRY - * SCALAR("item 2",plain) - * BLOCK-END - * KEY - * SCALAR("a mapping",plain) - * VALUE - * BLOCK-MAPPING-START - * KEY - * SCALAR("key 1",plain) - * VALUE - * SCALAR("value 1",plain) - * KEY - * SCALAR("key 2",plain) - * VALUE - * SCALAR("value 2",plain) - * BLOCK-END - * BLOCK-END - * STREAM-END - * - * YAML also permits non-indented sequences if they are included into a block - * mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: - * - * key: - * - item 1 # BLOCK-SEQUENCE-START is NOT produced here. - * - item 2 - * - * Tokens: - * - * STREAM-START(utf-8) - * BLOCK-MAPPING-START - * KEY - * SCALAR("key",plain) - * VALUE - * BLOCK-ENTRY - * SCALAR("item 1",plain) - * BLOCK-ENTRY - * SCALAR("item 2",plain) - * BLOCK-END - */ - -/* - * Ensure that the buffer contains the required number of characters. - * Return 1 on success, 0 on failure (reader error or memory error). - */ -func cache(parser *yaml_parser_t, length int) bool { - if parser.unread >= length { - return true - } - - return yaml_parser_update_buffer(parser, length) -} - -/* - * Advance the buffer pointer. - */ -func skip(parser *yaml_parser_t) { - parser.mark.index++ - parser.mark.column++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) -} - -func skip_line(parser *yaml_parser_t) { - if is_crlf_at(parser.buffer, parser.buffer_pos) { - parser.mark.index += 2 - parser.mark.column = 0 - parser.mark.line++ - parser.unread -= 2 - parser.buffer_pos += 2 - } else if is_break_at(parser.buffer, parser.buffer_pos) { - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) - } -} - -/* - * Copy a character to a string buffer and advance pointers. - */ - -func read(parser *yaml_parser_t, s []byte) []byte { - w := width(parser.buffer[parser.buffer_pos]) - if w == 0 { - panic("invalid character sequence") - } - if len(s) == 0 { - s = make([]byte, 0, 32) - } - if w == 1 && len(s)+w <= cap(s) { - s = s[:len(s)+1] - s[len(s)-1] = parser.buffer[parser.buffer_pos] - parser.buffer_pos++ - } else { - s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) - parser.buffer_pos += w - } - parser.mark.index++ - parser.mark.column++ - parser.unread-- - return s -} - -/* - * Copy a line break character to a string buffer and advance pointers. - */ -func read_line(parser *yaml_parser_t, s []byte) []byte { - buf := parser.buffer - pos := parser.buffer_pos - if buf[pos] == '\r' && buf[pos+1] == '\n' { - /* CR LF . LF */ - s = append(s, '\n') - parser.buffer_pos += 2 - parser.mark.index++ - parser.unread-- - } else if buf[pos] == '\r' || buf[pos] == '\n' { - /* CR|LF . LF */ - s = append(s, '\n') - parser.buffer_pos += 1 - } else if buf[pos] == '\xC2' && buf[pos+1] == '\x85' { - /* NEL . LF */ - s = append(s, '\n') - parser.buffer_pos += 2 - } else if buf[pos] == '\xE2' && buf[pos+1] == '\x80' && - (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9') { - // LS|PS . LS|PS - s = append(s, buf[parser.buffer_pos:pos+3]...) - parser.buffer_pos += 3 - } else { - return s - } - - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - return s -} - -/* - * Get the next token. - */ - -func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { - /* Erase the token object. */ - *token = yaml_token_t{} - - /* No tokens after STREAM-END or error. */ - - if parser.stream_end_produced || parser.error != yaml_NO_ERROR { - return true - } - - /* Ensure that the tokens queue contains enough tokens. */ - - if !parser.token_available { - if !yaml_parser_fetch_more_tokens(parser) { - return false - } - } - - /* Fetch the next token from the queue. */ - - *token = parser.tokens[parser.tokens_head] - parser.tokens_head++ - parser.token_available = false - parser.tokens_parsed++ - - if token.token_type == yaml_STREAM_END_TOKEN { - parser.stream_end_produced = true - } - - return true -} - -/* - * Set the scanner error and return 0. - */ - -func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, - context_mark YAML_mark_t, problem string) bool { - parser.error = yaml_SCANNER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = parser.mark - - return false -} - -func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark YAML_mark_t, problem string) bool { - context := "while parsing a %TAG directive" - if directive { - context = "while parsing a tag" - } - return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet") -} - -/* - * Ensure that the tokens queue contains at least one token which can be - * returned to the Parser. - */ - -func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { - /* While we need more tokens to fetch, do it. */ - - for { - /* - * Check if we really need to fetch more tokens. - */ - - need_more_tokens := false - - if parser.tokens_head == len(parser.tokens) { - /* Queue is empty. */ - - need_more_tokens = true - } else { - - /* Check if any potential simple key may occupy the head position. */ - - if !yaml_parser_stale_simple_keys(parser) { - return false - } - - for i := range parser.simple_keys { - simple_key := &parser.simple_keys[i] - - if simple_key.possible && - simple_key.token_number == parser.tokens_parsed { - need_more_tokens = true - break - } - } - } - if len(parser.simple_keys) > 0 { - - } - /* We are finished. */ - - if !need_more_tokens { - break - } - - /* Fetch the next token. */ - - if !yaml_parser_fetch_next_token(parser) { - return false - } - - } - - parser.token_available = true - - return true -} - -/* - * The dispatcher for token fetchers. - */ - -func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { - /* Ensure that the buffer is initialized. */ - - if !cache(parser, 1) { - return false - } - - /* Check if we just started scanning. Fetch STREAM-START then. */ - - if !parser.stream_start_produced { - return yaml_parser_fetch_stream_start(parser) - } - - /* Eat whitespaces and comments until we reach the next token. */ - - if !yaml_parser_scan_to_next_token(parser) { - return false - } - - /* Remove obsolete potential simple keys. */ - - if !yaml_parser_stale_simple_keys(parser) { - return false - } - - /* Check the indentation level against the current column. */ - - if !yaml_parser_unroll_indent(parser, parser.mark.column) { - return false - } - - /* - * Ensure that the buffer contains at least 4 characters. 4 is the length - * of the longest indicators ('--- ' and '... '). - */ - - if !cache(parser, 4) { - return false - } - - /* Is it the end of the stream? */ - buf := parser.buffer - pos := parser.buffer_pos - - if is_z(buf[pos]) { - return yaml_parser_fetch_stream_end(parser) - } - - /* Is it a directive? */ - - if parser.mark.column == 0 && buf[pos] == '%' { - return yaml_parser_fetch_directive(parser) - } - - /* Is it the document start indicator? */ - - if parser.mark.column == 0 && - buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && - is_blankz_at(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, - yaml_DOCUMENT_START_TOKEN) - } - - /* Is it the document end indicator? */ - - if parser.mark.column == 0 && - buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && - is_blankz_at(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, - yaml_DOCUMENT_END_TOKEN) - } - - /* Is it the flow sequence start indicator? */ - - if buf[pos] == '[' { - return yaml_parser_fetch_flow_collection_start(parser, - yaml_FLOW_SEQUENCE_START_TOKEN) - } - - /* Is it the flow mapping start indicator? */ - - if buf[pos] == '{' { - return yaml_parser_fetch_flow_collection_start(parser, - yaml_FLOW_MAPPING_START_TOKEN) - } - - /* Is it the flow sequence end indicator? */ - - if buf[pos] == ']' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_SEQUENCE_END_TOKEN) - } - - /* Is it the flow mapping end indicator? */ - - if buf[pos] == '}' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_MAPPING_END_TOKEN) - } - - /* Is it the flow entry indicator? */ - - if buf[pos] == ',' { - return yaml_parser_fetch_flow_entry(parser) - } - - /* Is it the block entry indicator? */ - if buf[pos] == '-' && is_blankz_at(buf, pos+1) { - return yaml_parser_fetch_block_entry(parser) - } - - /* Is it the key indicator? */ - if buf[pos] == '?' && - (parser.flow_level > 0 || is_blankz_at(buf, pos+1)) { - return yaml_parser_fetch_key(parser) - } - - /* Is it the value indicator? */ - if buf[pos] == ':' && - (parser.flow_level > 0 || is_blankz_at(buf, pos+1)) { - return yaml_parser_fetch_value(parser) - } - - /* Is it an alias? */ - if buf[pos] == '*' { - return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) - } - - /* Is it an anchor? */ - - if buf[pos] == '&' { - return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) - } - - /* Is it a tag? */ - - if buf[pos] == '!' { - return yaml_parser_fetch_tag(parser) - } - - /* Is it a literal scalar? */ - if buf[pos] == '|' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, true) - } - - /* Is it a folded scalar? */ - if buf[pos] == '>' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, false) - } - - /* Is it a single-quoted scalar? */ - - if buf[pos] == '\'' { - return yaml_parser_fetch_flow_scalar(parser, true) - } - - /* Is it a double-quoted scalar? */ - if buf[pos] == '"' { - return yaml_parser_fetch_flow_scalar(parser, false) - } - - /* - * Is it a plain scalar? - * - * A plain scalar may start with any non-blank characters except - * - * '-', '?', ':', ',', '[', ']', '{', '}', - * '#', '&', '*', '!', '|', '>', '\'', '\"', - * '%', '@', '`'. - * - * In the block context (and, for the '-' indicator, in the flow context - * too), it may also start with the characters - * - * '-', '?', ':' - * - * if it is followed by a non-space character. - * - * The last rule is more restrictive than the specification requires. - */ - - b := buf[pos] - if !(is_blankz_at(buf, pos) || b == '-' || - b == '?' || b == ':' || - b == ',' || b == '[' || - b == ']' || b == '{' || - b == '}' || b == '#' || - b == '&' || b == '*' || - b == '!' || b == '|' || - b == '>' || b == '\'' || - b == '"' || b == '%' || - b == '@' || b == '`') || - (b == '-' && !is_blank(buf[pos+1])) || - (parser.flow_level == 0 && - (buf[pos] == '?' || buf[pos] == ':') && - !is_blank(buf[pos+1])) { - return yaml_parser_fetch_plain_scalar(parser) - } - - /* - * If we don't determine the token type so far, it is an error. - */ - - return yaml_parser_set_scanner_error(parser, - "while scanning for the next token", parser.mark, - "found character that cannot start any token") -} - -/* - * Check the list of potential simple keys and remove the positions that - * cannot contain simple keys anymore. - */ - -func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { - /* Check for a potential simple key for each flow level. */ - - for i := range parser.simple_keys { - /* - * The specification requires that a simple key - * - * - is limited to a single line, - * - is shorter than 1024 characters. - */ - - simple_key := &parser.simple_keys[i] - if simple_key.possible && - (simple_key.mark.line < parser.mark.line || - simple_key.mark.index+1024 < parser.mark.index) { - - /* Check if the potential simple key to be removed is required. */ - - if simple_key.required { - return yaml_parser_set_scanner_error(parser, - "while scanning a simple key", simple_key.mark, - "could not find expected ':'") - } - - simple_key.possible = false - } - } - - return true -} - -/* - * Check if a simple key may start at the current position and add it if - * needed. - */ - -func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { - /* - * A simple key is required at the current position if the scanner is in - * the block context and the current column coincides with the indentation - * level. - */ - - required := (parser.flow_level == 0 && - parser.indent == parser.mark.column) - - /* - * A simple key is required only when it is the first token in the current - * line. Therefore it is always allowed. But we add a check anyway. - */ - if required && !parser.simple_key_allowed { - panic("impossible") /* Impossible. */ - } - - /* - * If the current position may start a simple key, save it. - */ - - if parser.simple_key_allowed { - simple_key := yaml_simple_key_t{ - possible: true, - required: required, - token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), - } - simple_key.mark = parser.mark - - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_keys[len(parser.simple_keys)-1] = simple_key - } - - return true -} - -/* - * Remove a potential simple key at the current flow level. - */ - -func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { - simple_key := &parser.simple_keys[len(parser.simple_keys)-1] - - if simple_key.possible { - /* If the key is required, it is an error. */ - - if simple_key.required { - return yaml_parser_set_scanner_error(parser, - "while scanning a simple key", simple_key.mark, - "could not find expected ':'") - } - } - - /* Remove the key from the stack. */ - - simple_key.possible = false - - return true -} - -/* - * Increase the flow level and resize the simple key list if needed. - */ - -func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { - /* Reset the simple key on the next level. */ - - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) - - /* Increase the flow level. */ - - parser.flow_level++ - - return true -} - -/* - * Decrease the flow level. - */ - -func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { - if parser.flow_level > 0 { - parser.flow_level-- - parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] - } - - return true -} - -/* - * Push the current indentation level to the stack and set the new level - * the current column is greater than the indentation level. In this case, - * append or insert the specified token into the token queue. - * - */ - -func yaml_parser_roll_indent(parser *yaml_parser_t, column int, - number int, token_type yaml_token_type_t, mark YAML_mark_t) bool { - /* In the flow context, do nothing. */ - - if parser.flow_level > 0 { - return true - } - - if parser.indent == -1 || parser.indent < column { - /* - * Push the current indentation level to the stack and set the new - * indentation level. - */ - - parser.indents = append(parser.indents, parser.indent) - parser.indent = column - - /* Create a token and insert it into the queue. */ - token := yaml_token_t{ - token_type: token_type, - start_mark: mark, - end_mark: mark, - } - - // number == -1 -> enqueue otherwise insert - if number > -1 { - number -= parser.tokens_parsed - } - insert_token(parser, number, &token) - } - - return true -} - -/* - * Pop indentation levels from the indents stack until the current level - * becomes less or equal to the column. For each indentation level, append - * the BLOCK-END token. - */ - -func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { - /* In the flow context, do nothing. */ - - if parser.flow_level > 0 { - return true - } - - /* - * column is unsigned and parser->indent is signed, so if - * parser->indent is less than zero the conditional in the while - * loop below is incorrect. Guard against that. - */ - - if parser.indent < 0 { - return true - } - - /* Loop through the indentation levels in the stack. */ - - for parser.indent > column { - /* Create a token and append it to the queue. */ - token := yaml_token_t{ - token_type: yaml_BLOCK_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - insert_token(parser, -1, &token) - - /* Pop the indentation level. */ - parser.indent = parser.indents[len(parser.indents)-1] - parser.indents = parser.indents[:len(parser.indents)-1] - - } - - return true -} - -/* - * Pop indentation levels from the indents stack until the current - * level resets to -1. For each indentation level, append the - * BLOCK-END token. - */ - -func yaml_parser_reset_indent(parser *yaml_parser_t) bool { - /* In the flow context, do nothing. */ - - if parser.flow_level > 0 { - return true - } - - /* Loop through the indentation levels in the stack. */ - - for parser.indent > -1 { - /* Create a token and append it to the queue. */ - - token := yaml_token_t{ - token_type: yaml_BLOCK_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - insert_token(parser, -1, &token) - - /* Pop the indentation level. */ - parser.indent = parser.indents[len(parser.indents)-1] - parser.indents = parser.indents[:len(parser.indents)-1] - } - - return true -} - -/* - * Initialize the scanner and produce the STREAM-START token. - */ - -func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { - /* Set the initial indentation. */ - - parser.indent = -1 - - /* Initialize the simple key stack. */ - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) - - /* A simple key is allowed at the beginning of the stream. */ - - parser.simple_key_allowed = true - - /* We have started. */ - - parser.stream_start_produced = true - - /* Create the STREAM-START token and append it to the queue. */ - token := yaml_token_t{ - token_type: yaml_STREAM_START_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - encoding: parser.encoding, - } - insert_token(parser, -1, &token) - - return true -} - -/* - * Produce the STREAM-END token and shut down the scanner. - */ - -func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { - /* Force new line. */ - - if parser.mark.column != 0 { - parser.mark.column = 0 - parser.mark.line++ - } - - /* Reset the indentation level. */ - - if !yaml_parser_reset_indent(parser) { - return false - } - - /* Reset simple keys. */ - - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - /* Create the STREAM-END token and append it to the queue. */ - token := yaml_token_t{ - token_type: yaml_STREAM_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - - insert_token(parser, -1, &token) - - return true -} - -/* - * Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. - */ - -func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { - /* Reset the indentation level. */ - - if !yaml_parser_reset_indent(parser) { - return false - } - - /* Reset simple keys. */ - - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - /* Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. */ - var token yaml_token_t - if !yaml_parser_scan_directive(parser, &token) { - return false - } - - /* Append the token to the queue. */ - insert_token(parser, -1, &token) - - return true -} - -/* - * Produce the DOCUMENT-START or DOCUMENT-END token. - */ - -func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, - token_type yaml_token_type_t) bool { - - /* Reset the indentation level. */ - - if !yaml_parser_reset_indent(parser) { - return false - } - - /* Reset simple keys. */ - - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - /* Consume the token. */ - - start_mark := parser.mark - - skip(parser) - skip(parser) - skip(parser) - - end_mark := parser.mark - - /* Create the DOCUMENT-START or DOCUMENT-END token. */ - - token := yaml_token_t{ - token_type: token_type, - start_mark: start_mark, - end_mark: end_mark, - } - - /* Append the token to the queue. */ - - insert_token(parser, -1, &token) - - return true -} - -/* - * Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. - */ - -func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, - token_type yaml_token_type_t) bool { - - /* The indicators '[' and '{' may start a simple key. */ - - if !yaml_parser_save_simple_key(parser) { - return false - } - - /* Increase the flow level. */ - - if !yaml_parser_increase_flow_level(parser) { - return false - } - - /* A simple key may follow the indicators '[' and '{'. */ - - parser.simple_key_allowed = true - - /* Consume the token. */ - - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - /* Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. */ - - token := yaml_token_t{ - token_type: token_type, - start_mark: start_mark, - end_mark: end_mark, - } - - /* Append the token to the queue. */ - - insert_token(parser, -1, &token) - - return true -} - -/* - * Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. - */ - -func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, - token_type yaml_token_type_t) bool { - - /* Reset any potential simple key on the current flow level. */ - - if !yaml_parser_remove_simple_key(parser) { - return false - } - - /* Decrease the flow level. */ - - if !yaml_parser_decrease_flow_level(parser) { - return false - } - - /* No simple keys after the indicators ']' and '}'. */ - - parser.simple_key_allowed = false - - /* Consume the token. */ - - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - /* Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. */ - - token := yaml_token_t{ - token_type: token_type, - start_mark: start_mark, - end_mark: end_mark, - } - - /* Append the token to the queue. */ - - insert_token(parser, -1, &token) - - return true -} - -/* - * Produce the FLOW-ENTRY token. - */ - -func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { - - /* Reset any potential simple keys on the current flow level. */ - - if !yaml_parser_remove_simple_key(parser) { - return false - } - - /* Simple keys are allowed after ','. */ - - parser.simple_key_allowed = true - - /* Consume the token. */ - - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - /* Create the FLOW-ENTRY token and append it to the queue. */ - - token := yaml_token_t{ - token_type: yaml_FLOW_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - - insert_token(parser, -1, &token) - - return true -} - -/* - * Produce the BLOCK-ENTRY token. - */ - -func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { - - /* Check if the scanner is in the block context. */ - - if parser.flow_level == 0 { - /* Check if we are allowed to start a new entry. */ - - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "block sequence entries are not allowed in this context") - } - - /* Add the BLOCK-SEQUENCE-START token if needed. */ - - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, - yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { - return false - } - } else { - /* - * It is an error for the '-' indicator to occur in the flow context, - * but we let the Parser detect and report about it because the Parser - * is able to point to the context. - */ - } - - /* Reset any potential simple keys on the current flow level. */ - - if !yaml_parser_remove_simple_key(parser) { - return false - } - - /* Simple keys are allowed after '-'. */ - - parser.simple_key_allowed = true - - /* Consume the token. */ - - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - /* Create the BLOCK-ENTRY token and append it to the queue. */ - - token := yaml_token_t{ - token_type: yaml_BLOCK_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - - insert_token(parser, -1, &token) - - return true -} - -/* - * Produce the KEY token. - */ - -func yaml_parser_fetch_key(parser *yaml_parser_t) bool { - /* In the block context, additional checks are required. */ - - if parser.flow_level == 0 { - /* Check if we are allowed to start a new key (not nessesary simple). */ - - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping keys are not allowed in this context") - } - - /* Add the BLOCK-MAPPING-START token if needed. */ - - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, - yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - /* Reset any potential simple keys on the current flow level. */ - - if !yaml_parser_remove_simple_key(parser) { - return false - } - - /* Simple keys are allowed after '?' in the block context. */ - - parser.simple_key_allowed = (parser.flow_level == 0) - - /* Consume the token. */ - - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - /* Create the KEY token and append it to the queue. */ - - token := yaml_token_t{ - token_type: yaml_KEY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - - insert_token(parser, -1, &token) - - return true -} - -/* - * Produce the VALUE token. - */ - -func yaml_parser_fetch_value(parser *yaml_parser_t) bool { - - simple_key := &parser.simple_keys[len(parser.simple_keys)-1] - - /* Have we found a simple key? */ - - if simple_key.possible { - - /* Create the KEY token and insert it into the queue. */ - - token := yaml_token_t{ - token_type: yaml_KEY_TOKEN, - start_mark: simple_key.mark, - end_mark: simple_key.mark, - } - - insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) - - /* In the block context, we may need to add the BLOCK-MAPPING-START token. */ - - if !yaml_parser_roll_indent(parser, simple_key.mark.column, - simple_key.token_number, - yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { - return false - } - - /* Remove the simple key. */ - - simple_key.possible = false - - /* A simple key cannot follow another simple key. */ - - parser.simple_key_allowed = false - } else { - /* The ':' indicator follows a complex key. */ - - /* In the block context, extra checks are required. */ - - if parser.flow_level == 0 { - /* Check if we are allowed to start a complex value. */ - - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping values are not allowed in this context") - } - - /* Add the BLOCK-MAPPING-START token if needed. */ - - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, - yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - /* Simple keys after ':' are allowed in the block context. */ - - parser.simple_key_allowed = (parser.flow_level == 0) - } - - /* Consume the token. */ - - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - /* Create the VALUE token and append it to the queue. */ - - token := yaml_token_t{ - token_type: yaml_VALUE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - - insert_token(parser, -1, &token) - - return true -} - -/* - * Produce the ALIAS or ANCHOR token. - */ - -func yaml_parser_fetch_anchor(parser *yaml_parser_t, token_type yaml_token_type_t) bool { - - /* An anchor or an alias could be a simple key. */ - - if !yaml_parser_save_simple_key(parser) { - return false - } - - /* A simple key cannot follow an anchor or an alias. */ - - parser.simple_key_allowed = false - - /* Create the ALIAS or ANCHOR token and append it to the queue. */ - var token yaml_token_t - if !yaml_parser_scan_anchor(parser, &token, token_type) { - return false - } - - insert_token(parser, -1, &token) - - return true -} - -/* - * Produce the TAG token. - */ - -func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { - /* A tag could be a simple key. */ - - if !yaml_parser_save_simple_key(parser) { - return false - } - - /* A simple key cannot follow a tag. */ - - parser.simple_key_allowed = false - - /* Create the TAG token and append it to the queue. */ - var token yaml_token_t - if !yaml_parser_scan_tag(parser, &token) { - return false - } - - insert_token(parser, -1, &token) - - return true -} - -/* - * Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. - */ - -func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { - /* Remove any potential simple keys. */ - - if !yaml_parser_remove_simple_key(parser) { - return false - } - - /* A simple key may follow a block scalar. */ - - parser.simple_key_allowed = true - - /* Create the SCALAR token and append it to the queue. */ - var token yaml_token_t - if !yaml_parser_scan_block_scalar(parser, &token, literal) { - return false - } - - insert_token(parser, -1, &token) - - return true -} - -/* - * Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. - */ - -func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { - - /* A plain scalar could be a simple key. */ - - if !yaml_parser_save_simple_key(parser) { - return false - } - - /* A simple key cannot follow a flow scalar. */ - - parser.simple_key_allowed = false - - /* Create the SCALAR token and append it to the queue. */ - var token yaml_token_t - if !yaml_parser_scan_flow_scalar(parser, &token, single) { - return false - } - - insert_token(parser, -1, &token) - - return true -} - -/* - * Produce the SCALAR(...,plain) token. - */ - -func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { - /* A plain scalar could be a simple key. */ - - if !yaml_parser_save_simple_key(parser) { - return false - } - - /* A simple key cannot follow a flow scalar. */ - - parser.simple_key_allowed = false - - /* Create the SCALAR token and append it to the queue. */ - var token yaml_token_t - if !yaml_parser_scan_plain_scalar(parser, &token) { - return false - } - - insert_token(parser, -1, &token) - - return true -} - -/* - * Eat whitespaces and comments until the next token is found. - */ - -func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { - /* Until the next token is not found. */ - - for { - /* Allow the BOM mark to start a line. */ - - if !cache(parser, 1) { - return false - } - - if parser.mark.column == 0 && is_bom_at(parser.buffer, parser.buffer_pos) { - skip(parser) - } - - /* - * Eat whitespaces. - * - * Tabs are allowed: - * - * - in the flow context; - * - in the block context, but not at the beginning of the line or - * after '-', '?', or ':' (complex value). - */ - - if !cache(parser, 1) { - return false - } - - for parser.buffer[parser.buffer_pos] == ' ' || - ((parser.flow_level > 0 || !parser.simple_key_allowed) && - parser.buffer[parser.buffer_pos] == '\t') { - skip(parser) - if !cache(parser, 1) { - return false - } - } - - /* Eat a comment until a line break. */ - - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz_at(parser.buffer, parser.buffer_pos) { - skip(parser) - if !cache(parser, 1) { - return false - } - } - } - - /* If it is a line break, eat it. */ - - if is_break_at(parser.buffer, parser.buffer_pos) { - if !cache(parser, 2) { - return false - } - skip_line(parser) - - /* In the block context, a new line may start a simple key. */ - - if parser.flow_level == 0 { - parser.simple_key_allowed = true - } - } else { - /* We have found a token. */ - - break - } - } - - return true -} - -/* - * Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. - * - * Scope: - * %YAML 1.1 # a comment \n - * ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - * %TAG !yaml! tag:yaml.org,2002: \n - * ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - */ - -func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { - /* Eat '%'. */ - - start_mark := parser.mark - - skip(parser) - - /* Scan the directive name. */ - var name []byte - if !yaml_parser_scan_directive_name(parser, start_mark, &name) { - return false - } - - /* Is it a YAML directive? */ - var major, minor int - if bytes.Equal(name, []byte("YAML")) { - /* Scan the VERSION directive value. */ - - if !yaml_parser_scan_version_directive_value(parser, start_mark, - &major, &minor) { - return false - } - - end_mark := parser.mark - - /* Create a VERSION-DIRECTIVE token. */ - - *token = yaml_token_t{ - token_type: yaml_VERSION_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - major: major, - minor: minor, - } - } else if bytes.Equal(name, []byte("TAG")) { - /* Is it a TAG directive? */ - /* Scan the TAG directive value. */ - var handle, prefix []byte - if !yaml_parser_scan_tag_directive_value(parser, start_mark, - &handle, &prefix) { - return false - } - - end_mark := parser.mark - - /* Create a TAG-DIRECTIVE token. */ - - *token = yaml_token_t{ - token_type: yaml_TAG_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - prefix: prefix, - } - } else { - /* Unknown directive. */ - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found uknown directive name") - return false - } - - /* Eat the rest of the line including any comments. */ - - if !cache(parser, 1) { - return false - } - - for is_blank(parser.buffer[parser.buffer_pos]) { - skip(parser) - if !cache(parser, 1) { - return false - } - } - - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz_at(parser.buffer, parser.buffer_pos) { - skip(parser) - if !cache(parser, 1) { - return false - } - } - } - - /* Check if we are at the end of the line. */ - - if !is_breakz_at(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "did not find expected comment or line break") - return false - } - - /* Eat a line break. */ - - if is_break_at(parser.buffer, parser.buffer_pos) { - if !cache(parser, 2) { - return false - } - skip_line(parser) - } - - return true -} - -/* - * Scan the directive name. - * - * Scope: - * %YAML 1.1 # a comment \n - * ^^^^ - * %TAG !yaml! tag:yaml.org,2002: \n - * ^^^ - */ - -func yaml_parser_scan_directive_name(parser *yaml_parser_t, - start_mark YAML_mark_t, name *[]byte) bool { - - /* Consume the directive name. */ - - if !cache(parser, 1) { - return false - } - - var s []byte - for is_alpha(parser.buffer[parser.buffer_pos]) { - s = read(parser, s) - if !cache(parser, 1) { - return false - } - } - - /* Check if the name is empty. */ - - if len(s) == 0 { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "could not find expected directive name") - return false - } - - /* Check for an blank character after the name. */ - - if !is_blankz_at(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found unexpected non-alphabetical character") - return false - } - - *name = s - - return true -} - -/* - * Scan the value of VERSION-DIRECTIVE. - * - * Scope: - * %YAML 1.1 # a comment \n - * ^^^^^^ - */ - -func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, - start_mark YAML_mark_t, major *int, minor *int) bool { - /* Eat whitespaces. */ - - if !cache(parser, 1) { - return false - } - - for is_blank(parser.buffer[parser.buffer_pos]) { - skip(parser) - if !cache(parser, 1) { - return false - } - } - - /* Consume the major version number. */ - - if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { - return false - } - - /* Eat '.'. */ - - if parser.buffer[parser.buffer_pos] != '.' { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected digit or '.' character") - } - - skip(parser) - - /* Consume the minor version number. */ - - if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { - return false - } - - return true -} - -const MAX_NUMBER_LENGTH = 9 - -/* - * Scan the version number of VERSION-DIRECTIVE. - * - * Scope: - * %YAML 1.1 # a comment \n - * ^ - * %YAML 1.1 # a comment \n - * ^ - */ - -func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, - start_mark YAML_mark_t, number *int) bool { - - /* Repeat while the next character is digit. */ - - if !cache(parser, 1) { - return false - } - - value := 0 - length := 0 - for is_digit(parser.buffer[parser.buffer_pos]) { - /* Check if the number is too long. */ - - length++ - if length > MAX_NUMBER_LENGTH { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "found extremely long version number") - } - - value = value*10 + as_digit(parser.buffer[parser.buffer_pos]) - - skip(parser) - - if !cache(parser, 1) { - return false - } - } - - /* Check if the number was present. */ - - if length == 0 { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected version number") - } - - *number = value - - return true -} - -/* - * Scan the value of a TAG-DIRECTIVE token. - * - * Scope: - * %TAG !yaml! tag:yaml.org,2002: \n - * ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - */ - -func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, - start_mark YAML_mark_t, handle, prefix *[]byte) bool { - - /* Eat whitespaces. */ - - if !cache(parser, 1) { - return false - } - - for is_blank(parser.buffer[parser.buffer_pos]) { - skip(parser) - if !cache(parser, 1) { - return false - } - } - - /* Scan a handle. */ - var handle_value []byte - if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { - return false - } - - /* Expect a whitespace. */ - - if !cache(parser, 1) { - return false - } - - if !is_blank(parser.buffer[parser.buffer_pos]) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace") - return false - } - - /* Eat whitespaces. */ - - for is_blank(parser.buffer[parser.buffer_pos]) { - skip(parser) - if !cache(parser, 1) { - return false - } - } - - /* Scan a prefix. */ - var prefix_value []byte - if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { - return false - } - - /* Expect a whitespace or line break. */ - - if !cache(parser, 1) { - return false - } - - if !is_blankz_at(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace or line break") - return false - } - - *handle = handle_value - *prefix = prefix_value - - return true -} - -func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, - token_type yaml_token_type_t) bool { - - /* Eat the indicator character. */ - - start_mark := parser.mark - - skip(parser) - - /* Consume the value. */ - - if !cache(parser, 1) { - return false - } - - var s []byte - for is_alpha(parser.buffer[parser.buffer_pos]) { - s = read(parser, s) - if !cache(parser, 1) { - return false - } - } - - end_mark := parser.mark - - /* - * Check if length of the anchor is greater than 0 and it is followed by - * a whitespace character or one of the indicators: - * - * '?', ':', ',', ']', '}', '%', '@', '`'. - */ - - b := parser.buffer[parser.buffer_pos] - if len(s) == 0 || !(is_blankz_at(parser.buffer, parser.buffer_pos) || b == '?' || - b == ':' || b == ',' || - b == ']' || b == '}' || - b == '%' || b == '@' || - b == '`') { - context := "while scanning an anchor" - if token_type != yaml_ANCHOR_TOKEN { - context = "while scanning an alias" - } - yaml_parser_set_scanner_error(parser, context, start_mark, - "did not find expected alphabetic or numeric character") - return false - } - - /* Create a token. */ - *token = yaml_token_t{ - token_type: token_type, - start_mark: start_mark, - end_mark: end_mark, - value: s, - } - - return true -} - -/* - * Scan a TAG token. - */ - -func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { - start_mark := parser.mark - - /* Check if the tag is in the canonical form. */ - - if !cache(parser, 2) { - return false - } - - var handle []byte - var suffix []byte - if parser.buffer[parser.buffer_pos+1] == '<' { - /* Set the handle to '' */ - - /* Eat '!<' */ - - skip(parser) - skip(parser) - - /* Consume the tag value. */ - - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - - /* Check for '>' and eat it. */ - - if parser.buffer[parser.buffer_pos] != '>' { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find the expected '>'") - return false - } - - skip(parser) - } else if is_blank(parser.buffer[parser.buffer_pos+1]) { - // NON-SPECIFIED - skip(parser) - } else { - /* The tag has either the '!suffix' or the '!handle!suffix' form. */ - - /* First, try to scan a handle. */ - - if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { - return false - } - - /* Check if it is, indeed, handle. */ - - if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { - /* Scan the suffix now. */ - - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - } else { - /* It wasn't a handle after all. Scan the rest of the tag. */ - - if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { - return false - } - - /* Set the handle to '!'. */ - - handle = []byte{'!'} - - /* - * A special case: the '!' tag. Set the handle to '' and the - * suffix to '!'. - */ - - if len(suffix) == 0 { - handle, suffix = suffix, handle - } - - } - } - - /* Check the character which ends the tag. */ - - if !cache(parser, 1) { - return false - } - - if !is_blankz_at(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find expected whitespace or line break") - return false - } - - end_mark := parser.mark - - /* Create a token. */ - - *token = yaml_token_t{ - token_type: yaml_TAG_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - suffix: suffix, - } - - return true -} - -/* - * Scan a tag handle. - */ - -func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, - start_mark YAML_mark_t, handle *[]byte) bool { - - /* Check the initial '!' character. */ - - if !cache(parser, 1) { - return false - } - - if parser.buffer[parser.buffer_pos] != '!' { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - - /* Copy the '!' character. */ - var s []byte - s = read(parser, s) - - /* Copy all subsequent alphabetical and numerical characters. */ - - if !cache(parser, 1) { - return false - } - - for is_alpha(parser.buffer[parser.buffer_pos]) { - s = read(parser, s) - if !cache(parser, 1) { - return false - } - } - - /* Check if the trailing character is '!' and copy it. */ - - if parser.buffer[parser.buffer_pos] == '!' { - s = read(parser, s) - } else { - /* - * It's either the '!' tag or not really a tag handle. If it's a %TAG - * directive, it's an error. If it's a tag token, it must be a part of - * URI. - */ - - if directive && !(s[0] == '!' && len(s) == 1) { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - } - - *handle = s - - return true -} - -/* - * Scan a tag. - */ - -func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, - head []byte, start_mark YAML_mark_t, uri *[]byte) bool { - - var s []byte - /* - * Copy the head if needed. - * - * Note that we don't copy the leading '!' character. - */ - if len(head) > 1 { - s = append(s, head[1:]...) - } - - /* Scan the tag. */ - if !cache(parser, 1) { - return false - } - - /* - * The set of characters that may appear in URI is as follows: - * - * '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', - * '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', - * '%'. - */ - - b := parser.buffer[parser.buffer_pos] - for is_alpha(b) || b == ';' || - b == '/' || b == '?' || - b == ':' || b == '@' || - b == '&' || b == '=' || - b == '+' || b == '$' || - b == ',' || b == '.' || - b == '!' || b == '~' || - b == '*' || b == '\'' || - b == '(' || b == ')' || - b == '[' || b == ']' || - b == '%' { - /* Check if it is a URI-escape sequence. */ - - if b == '%' { - if !yaml_parser_scan_uri_escapes(parser, - directive, start_mark, &s) { - return false - } - } else { - s = read(parser, s) - } - - if !cache(parser, 1) { - return false - } - b = parser.buffer[parser.buffer_pos] - } - - /* Check if the tag is non-empty. */ - - if len(s) == 0 { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected tag URI") - return false - } - - *uri = s - - return true -} - -/* - * Decode an URI-escape sequence corresponding to a single UTF-8 character. - */ - -func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, - start_mark YAML_mark_t, s *[]byte) bool { - - /* Decode the required number of characters. */ - w := 10 - for w > 0 { - - /* Check for a URI-escaped octet. */ - - if !cache(parser, 3) { - return false - } - - if !(parser.buffer[parser.buffer_pos] == '%' && - is_hex(parser.buffer[parser.buffer_pos+1]) && - is_hex(parser.buffer[parser.buffer_pos+2])) { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find URI escaped octet") - } - - /* Get the octet. */ - octet := byte((as_hex(parser.buffer[parser.buffer_pos+1]) << 4) + - as_hex(parser.buffer[parser.buffer_pos+2])) - - /* If it is the leading octet, determine the length of the UTF-8 sequence. */ - - if w == 10 { - w = width(octet) - if w == 0 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect leading UTF-8 octet") - } - } else { - /* Check if the trailing octet is correct. */ - - if (octet & 0xC0) != 0x80 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect trailing UTF-8 octet") - } - } - - /* Copy the octet and move the pointers. */ - - *s = append(*s, octet) - skip(parser) - skip(parser) - skip(parser) - w-- - } - - return true -} - -/* - * Scan a block scalar. - */ - -func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, - literal bool) bool { - - /* Eat the indicator '|' or '>'. */ - - start_mark := parser.mark - - skip(parser) - - /* Scan the additional block scalar indicators. */ - - if !cache(parser, 1) { - return false - } - - /* Check for a chomping indicator. */ - chomping := 0 - increment := 0 - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - /* Set the chomping method and eat the indicator. */ - - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - - skip(parser) - - /* Check for an indentation indicator. */ - - if !cache(parser, 1) { - return false - } - - if is_digit(parser.buffer[parser.buffer_pos]) { - /* Check that the indentation is greater than 0. */ - - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an indentation indicator equal to 0") - return false - } - - /* Get the indentation level and eat the indicator. */ - - increment = as_digit(parser.buffer[parser.buffer_pos]) - - skip(parser) - } - } else if is_digit(parser.buffer[parser.buffer_pos]) { - - /* Do the same as above, but in the opposite order. */ - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an indentation indicator equal to 0") - return false - } - - increment = as_digit(parser.buffer[parser.buffer_pos]) - - skip(parser) - - if !cache(parser, 1) { - return false - } - - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - - skip(parser) - } - } - - /* Eat whitespaces and comments to the end of the line. */ - - if !cache(parser, 1) { - return false - } - - for is_blank(parser.buffer[parser.buffer_pos]) { - skip(parser) - if !cache(parser, 1) { - return false - } - } - - if parser.buffer[parser.buffer_pos] == '#' { - for !is_breakz_at(parser.buffer, parser.buffer_pos) { - skip(parser) - if !cache(parser, 1) { - return false - } - } - } - - /* Check if we are at the end of the line. */ - - if !is_breakz_at(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "did not find expected comment or line break") - return false - } - - /* Eat a line break. */ - - if is_break_at(parser.buffer, parser.buffer_pos) { - if !cache(parser, 2) { - return false - } - - skip_line(parser) - } - - end_mark := parser.mark - - /* Set the indentation level if it was specified. */ - indent := 0 - if increment > 0 { - if parser.indent >= 0 { - indent = parser.indent + increment - } else { - indent = increment - } - } - - /* Scan the leading line breaks and determine the indentation level if needed. */ - var trailing_breaks []byte - if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, - start_mark, &end_mark) { - return false - } - - /* Scan the block scalar content. */ - - if !cache(parser, 1) { - return false - } - - var s []byte - var leading_break []byte - leading_blank := false - trailing_blank := false - for parser.mark.column == indent && !is_z(parser.buffer[parser.buffer_pos]) { - - /* - * We are at the beginning of a non-empty line. - */ - - /* Is it a trailing whitespace? */ - - trailing_blank = is_blank(parser.buffer[parser.buffer_pos]) - - /* Check if we need to fold the leading line break. */ - - if !literal && len(leading_break) > 0 && leading_break[0] == '\n' && - !leading_blank && !trailing_blank { - /* Do we need to join the lines by space? */ - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } - leading_break = leading_break[:0] - } else { - s = append(s, leading_break...) - leading_break = leading_break[:0] - } - - /* Append the remaining line breaks. */ - s = append(s, trailing_breaks...) - trailing_breaks = trailing_breaks[:0] - - /* Is it a leading whitespace? */ - - leading_blank = is_blank(parser.buffer[parser.buffer_pos]) - - /* Consume the current line. */ - - for !is_breakz_at(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if !cache(parser, 1) { - return false - } - } - - /* Consume the line break. */ - - if !cache(parser, 2) { - return false - } - - leading_break = read_line(parser, leading_break) - - /* Eat the following indentation spaces and line breaks. */ - - if !yaml_parser_scan_block_scalar_breaks(parser, - &indent, &trailing_breaks, start_mark, &end_mark) { - return false - } - } - - /* Chomp the tail. */ - - if chomping != -1 { - s = append(s, leading_break...) - } - if chomping == 1 { - s = append(s, trailing_breaks...) - } - - /* Create a token. */ - - *token = yaml_token_t{ - token_type: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_LITERAL_SCALAR_STYLE, - } - if !literal { - token.style = yaml_FOLDED_SCALAR_STYLE - } - - return true -} - -/* - * Scan indentation spaces and line breaks for a block scalar. Determine the - * indentation level if needed. - */ - -func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, - indent *int, breaks *[]byte, - start_mark YAML_mark_t, end_mark *YAML_mark_t) bool { - - *end_mark = parser.mark - - /* Eat the indentation spaces and line breaks. */ - max_indent := 0 - for { - /* Eat the indentation spaces. */ - - if !cache(parser, 1) { - return false - } - - for (*indent == 0 || parser.mark.column < *indent) && - is_space(parser.buffer[parser.buffer_pos]) { - skip(parser) - if !cache(parser, 1) { - return false - } - } - if parser.mark.column > max_indent { - max_indent = parser.mark.column - } - - /* Check for a tab character messing the indentation. */ - - if (*indent == 0 || parser.mark.column < *indent) && - is_tab(parser.buffer[parser.buffer_pos]) { - return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found a tab character where an indentation space is expected") - } - - /* Have we found a non-empty line? */ - - if !is_break_at(parser.buffer, parser.buffer_pos) { - break - } - - /* Consume the line break. */ - - if !cache(parser, 2) { - return false - } - - *breaks = read_line(parser, *breaks) - *end_mark = parser.mark - } - - /* Determine the indentation level if needed. */ - - if *indent == 0 { - *indent = max_indent - if *indent < parser.indent+1 { - *indent = parser.indent + 1 - } - if *indent < 1 { - *indent = 1 - } - } - - return true -} - -/* - * Scan a quoted scalar. - */ - -func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, - single bool) bool { - - /* Eat the left quote. */ - - start_mark := parser.mark - - skip(parser) - - /* Consume the content of the quoted scalar. */ - var s []byte - var leading_break []byte - var trailing_breaks []byte - var whitespaces []byte - for { - /* Check that there are no document indicators at the beginning of the line. */ - - if !cache(parser, 4) { - return false - } - - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz_at(parser.buffer, parser.buffer_pos+3) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected document indicator") - return false - } - - /* Check for EOF. */ - - if is_z(parser.buffer[parser.buffer_pos]) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected end of stream") - return false - } - - /* Consume non-blank characters. */ - - if !cache(parser, 2) { - return false - } - - leading_blanks := false - - for !is_blankz_at(parser.buffer, parser.buffer_pos) { - /* Check for an escaped single quote. */ - - if single && parser.buffer[parser.buffer_pos] == '\'' && - parser.buffer[parser.buffer_pos+1] == '\'' { - // Is is an escaped single quote. - s = append(s, '\'') - skip(parser) - skip(parser) - } else if single && parser.buffer[parser.buffer_pos] == '\'' { - /* Check for the right quote. */ - break - } else if !single && parser.buffer[parser.buffer_pos] == '"' { - /* Check for the right quote. */ - break - } else if !single && parser.buffer[parser.buffer_pos] == '\\' && - is_break_at(parser.buffer, parser.buffer_pos+1) { - - /* Check for an escaped line break. */ - if !cache(parser, 3) { - return false - } - - skip(parser) - skip_line(parser) - leading_blanks = true - break - } else if !single && parser.buffer[parser.buffer_pos] == '\\' { - - /* Check for an escape sequence. */ - - code_length := 0 - - /* Check the escape character. */ - - switch parser.buffer[parser.buffer_pos+1] { - case '0': - s = append(s, 0) - case 'a': - s = append(s, '\x07') - case 'b': - s = append(s, '\x08') - case 't', '\t': - s = append(s, '\x09') - case 'n': - s = append(s, '\x0A') - case 'v': - s = append(s, '\x0B') - case 'f': - s = append(s, '\x0C') - case 'r': - s = append(s, '\x0D') - case 'e': - s = append(s, '\x1B') - case ' ': - s = append(s, '\x20') - case '"': - s = append(s, '"') - case '/': - s = append(s, '/') - case '\\': - s = append(s, '\\') - case 'N': /* NEL (#x85) */ - s = append(s, '\xC2') - s = append(s, '\x85') - case '_': /* #xA0 */ - s = append(s, '\xC2') - s = append(s, '\xA0') - case 'L': /* LS (#x2028) */ - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA8') - case 'P': /* PS (#x2029) */ - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA9') - case 'x': - code_length = 2 - case 'u': - code_length = 4 - case 'U': - code_length = 8 - default: - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found unknown escape character") - return false - } - - skip(parser) - skip(parser) - - /* Consume an arbitrary escape code. */ - - if code_length > 0 { - value := 0 - - /* Scan the character value. */ - - if !cache(parser, code_length) { - return false - } - - for k := 0; k < code_length; k++ { - if !is_hex(parser.buffer[parser.buffer_pos+k]) { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "did not find expected hexdecimal number") - return false - } - value = (value << 4) + as_hex(parser.buffer[parser.buffer_pos+k]) - } - - /* Check the value and write the character. */ - - if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found invalid Unicode character escape code") - return false - } - - if value <= 0x7F { - s = append(s, byte(value)) - } else if value <= 0x7FF { - s = append(s, byte(0xC0+(value>>6))) - s = append(s, byte(0x80+(value&0x3F))) - } else if value <= 0xFFFF { - s = append(s, byte(0xE0+(value>>12))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } else { - s = append(s, byte(0xF0+(value>>18))) - s = append(s, byte(0x80+((value>>12)&0x3F))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } - - /* Advance the pointer. */ - - for k := 0; k < code_length; k++ { - skip(parser) - } - } - } else { - /* It is a non-escaped non-blank character. */ - - s = read(parser, s) - } - - if !cache(parser, 2) { - return false - } - } - - /* Check if we are at the end of the scalar. */ - b := parser.buffer[parser.buffer_pos] - if single { - if b == '\'' { - break - } - } else if b == '"' { - break - } - - /* Consume blank characters. */ - - if !cache(parser, 1) { - return false - } - - for is_blank(parser.buffer[parser.buffer_pos]) || is_break_at(parser.buffer, parser.buffer_pos) { - if is_blank(parser.buffer[parser.buffer_pos]) { - /* Consume a space or a tab character. */ - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if !cache(parser, 2) { - return false - } - - /* Check if it is a first line break. */ - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - - if !cache(parser, 1) { - return false - } - } - - /* Join the whitespaces or fold line breaks. */ - - if leading_blanks { - /* Do we need to fold line breaks? */ - - if len(leading_break) > 0 && leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - trailing_breaks = trailing_breaks[:0] - } - - leading_break = leading_break[:0] - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - leading_break = leading_break[:0] - trailing_breaks = trailing_breaks[:0] - } - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - /* Eat the right quote. */ - - skip(parser) - - end_mark := parser.mark - - /* Create a token. */ - - *token = yaml_token_t{ - token_type: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_SINGLE_QUOTED_SCALAR_STYLE, - } - if !single { - token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - - return true -} - -/* - * Scan a plain scalar. - */ - -func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { - var s []byte - var leading_break []byte - var trailing_breaks []byte - var whitespaces []byte - leading_blanks := false - indent := parser.indent + 1 - - start_mark := parser.mark - end_mark := parser.mark - - /* Consume the content of the plain scalar. */ - - for { - /* Check for a document indicator. */ - - if !cache(parser, 4) { - return false - } - - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz_at(parser.buffer, parser.buffer_pos+3) { - break - } - - /* Check for a comment. */ - - if parser.buffer[parser.buffer_pos] == '#' { - break - } - - /* Consume non-blank characters. */ - - for !is_blankz_at(parser.buffer, parser.buffer_pos) { - /* Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13". */ - - if parser.flow_level > 0 && - parser.buffer[parser.buffer_pos] == ':' && - !is_blankz_at(parser.buffer, parser.buffer_pos+1) { - yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", - start_mark, "found unexpected ':'") - return false - } - - /* Check for indicators that may end a plain scalar. */ - b := parser.buffer[parser.buffer_pos] - if (b == ':' && is_blankz_at(parser.buffer, parser.buffer_pos+1)) || - (parser.flow_level > 0 && - (b == ',' || b == ':' || - b == '?' || b == '[' || - b == ']' || b == '{' || - b == '}')) { - break - } - - /* Check if we need to join whitespaces and breaks. */ - - if leading_blanks || len(whitespaces) > 0 { - if leading_blanks { - /* Do we need to fold line breaks? */ - - if leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - trailing_breaks = trailing_breaks[:0] - } - leading_break = leading_break[:0] - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - leading_break = leading_break[:0] - trailing_breaks = trailing_breaks[:0] - } - - leading_blanks = false - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - /* Copy the character. */ - - s = read(parser, s) - end_mark = parser.mark - - if !cache(parser, 2) { - return false - } - } - - /* Is it the end? */ - - if !(is_blank(parser.buffer[parser.buffer_pos]) || - is_break_at(parser.buffer, parser.buffer_pos)) { - break - } - - /* Consume blank characters. */ - - if !cache(parser, 1) { - return false - } - - for is_blank(parser.buffer[parser.buffer_pos]) || - is_break_at(parser.buffer, parser.buffer_pos) { - - if is_blank(parser.buffer[parser.buffer_pos]) { - /* Check for tab character that abuse indentation. */ - - if leading_blanks && parser.mark.column < indent && - is_tab(parser.buffer[parser.buffer_pos]) { - yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", - start_mark, "found a tab character that violate indentation") - return false - } - - /* Consume a space or a tab character. */ - - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if !cache(parser, 2) { - return false - } - - /* Check if it is a first line break. */ - - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - if !cache(parser, 1) { - return false - } - } - - /* Check indentation level. */ - - if parser.flow_level == 0 && parser.mark.column < indent { - break - } - } - - /* Create a token. */ - - *token = yaml_token_t{ - token_type: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_PLAIN_SCALAR_STYLE, - } - - /* Note that we change the 'simple_key_allowed' flag. */ - - if leading_blanks { - parser.simple_key_allowed = true - } - - return true -} diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/tags.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/tags.go deleted file mode 100644 index f153aee46..000000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/tags.go +++ /dev/null @@ -1,360 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -import ( - "reflect" - "sort" - "strings" - "sync" - "unicode" -) - -// A field represents a single field found in a struct. -type field struct { - name string - tag bool - index []int - typ reflect.Type - omitEmpty bool - flow bool -} - -// byName sorts field by name, breaking ties with depth, -// then breaking ties with "name came from json tag", then -// breaking ties with index sequence. -type byName []field - -func (x byName) Len() int { return len(x) } - -func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byName) Less(i, j int) bool { - if x[i].name != x[j].name { - return x[i].name < x[j].name - } - if len(x[i].index) != len(x[j].index) { - return len(x[i].index) < len(x[j].index) - } - if x[i].tag != x[j].tag { - return x[i].tag - } - return byIndex(x).Less(i, j) -} - -// byIndex sorts field by index sequence. -type byIndex []field - -func (x byIndex) Len() int { return len(x) } - -func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byIndex) Less(i, j int) bool { - for k, xik := range x[i].index { - if k >= len(x[j].index) { - return false - } - if xik != x[j].index[k] { - return xik < x[j].index[k] - } - } - return len(x[i].index) < len(x[j].index) -} - -// typeFields returns a list of fields that JSON should recognize for the given type. -// The algorithm is breadth-first search over the set of structs to include - the top struct -// and then any reachable anonymous structs. -func typeFields(t reflect.Type) []field { - // Anonymous fields to explore at the current level and the next. - current := []field{} - next := []field{{typ: t}} - - // Count of queued names for current level and the next. - count := map[reflect.Type]int{} - nextCount := map[reflect.Type]int{} - - // Types already visited at an earlier level. - visited := map[reflect.Type]bool{} - - // Fields found. - var fields []field - - for len(next) > 0 { - current, next = next, current[:0] - count, nextCount = nextCount, map[reflect.Type]int{} - - for _, f := range current { - if visited[f.typ] { - continue - } - visited[f.typ] = true - - // Scan f.typ for fields to include. - for i := 0; i < f.typ.NumField(); i++ { - sf := f.typ.Field(i) - if sf.PkgPath != "" { // unexported - continue - } - tag := sf.Tag.Get("yaml") - if tag == "-" { - continue - } - name, opts := parseTag(tag) - if !isValidTag(name) { - name = "" - } - index := make([]int, len(f.index)+1) - copy(index, f.index) - index[len(f.index)] = i - - ft := sf.Type - if ft.Name() == "" && ft.Kind() == reflect.Ptr { - // Follow pointer. - ft = ft.Elem() - } - - // Record found field and index sequence. - if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { - tagged := name != "" - if name == "" { - name = sf.Name - } - fields = append(fields, field{name, tagged, index, ft, - opts.Contains("omitempty"), opts.Contains("flow")}) - if count[f.typ] > 1 { - // If there were multiple instances, add a second, - // so that the annihilation code will see a duplicate. - // It only cares about the distinction between 1 or 2, - // so don't bother generating any more copies. - fields = append(fields, fields[len(fields)-1]) - } - continue - } - - // Record new anonymous struct to explore in next round. - nextCount[ft]++ - if nextCount[ft] == 1 { - next = append(next, field{name: ft.Name(), index: index, typ: ft}) - } - } - } - } - - sort.Sort(byName(fields)) - - // Delete all fields that are hidden by the Go rules for embedded fields, - // except that fields with JSON tags are promoted. - - // The fields are sorted in primary order of name, secondary order - // of field index length. Loop over names; for each name, delete - // hidden fields by choosing the one dominant field that survives. - out := fields[:0] - for advance, i := 0, 0; i < len(fields); i += advance { - // One iteration per name. - // Find the sequence of fields with the name of this first field. - fi := fields[i] - name := fi.name - for advance = 1; i+advance < len(fields); advance++ { - fj := fields[i+advance] - if fj.name != name { - break - } - } - if advance == 1 { // Only one field with this name - out = append(out, fi) - continue - } - dominant, ok := dominantField(fields[i : i+advance]) - if ok { - out = append(out, dominant) - } - } - - fields = out - sort.Sort(byIndex(fields)) - - return fields -} - -// dominantField looks through the fields, all of which are known to -// have the same name, to find the single field that dominates the -// others using Go's embedding rules, modified by the presence of -// JSON tags. If there are multiple top-level fields, the boolean -// will be false: This condition is an error in Go and we skip all -// the fields. -func dominantField(fields []field) (field, bool) { - // The fields are sorted in increasing index-length order. The winner - // must therefore be one with the shortest index length. Drop all - // longer entries, which is easy: just truncate the slice. - length := len(fields[0].index) - tagged := -1 // Index of first tagged field. - for i, f := range fields { - if len(f.index) > length { - fields = fields[:i] - break - } - if f.tag { - if tagged >= 0 { - // Multiple tagged fields at the same level: conflict. - // Return no field. - return field{}, false - } - tagged = i - } - } - if tagged >= 0 { - return fields[tagged], true - } - // All remaining fields have the same length. If there's more than one, - // we have a conflict (two fields named "X" at the same level) and we - // return no field. - if len(fields) > 1 { - return field{}, false - } - return fields[0], true -} - -var fieldCache struct { - sync.RWMutex - m map[reflect.Type][]field -} - -// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. -func cachedTypeFields(t reflect.Type) []field { - fieldCache.RLock() - f := fieldCache.m[t] - fieldCache.RUnlock() - if f != nil { - return f - } - - // Compute fields without lock. - // Might duplicate effort but won't hold other computations back. - f = typeFields(t) - if f == nil { - f = []field{} - } - - fieldCache.Lock() - if fieldCache.m == nil { - fieldCache.m = map[reflect.Type][]field{} - } - fieldCache.m[t] = f - fieldCache.Unlock() - return f -} - -// tagOptions is the string following a comma in a struct field's "json" -// tag, or the empty string. It does not include the leading comma. -type tagOptions string - -func isValidTag(s string) bool { - if s == "" { - return false - } - for _, c := range s { - switch { - case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): - // Backslash and quote chars are reserved, but - // otherwise any punctuation chars are allowed - // in a tag name. - default: - if !unicode.IsLetter(c) && !unicode.IsDigit(c) { - return false - } - } - } - return true -} - -func fieldByIndex(v reflect.Value, index []int) reflect.Value { - for _, i := range index { - if v.Kind() == reflect.Ptr { - if v.IsNil() { - return reflect.Value{} - } - v = v.Elem() - } - v = v.Field(i) - } - return v -} - -func typeByIndex(t reflect.Type, index []int) reflect.Type { - for _, i := range index { - if t.Kind() == reflect.Ptr { - t = t.Elem() - } - t = t.Field(i).Type - } - return t -} - -// stringValues is a slice of reflect.Value holding *reflect.StringValue. -// It implements the methods to sort by string. -type stringValues []reflect.Value - -func (sv stringValues) Len() int { return len(sv) } -func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } -func (sv stringValues) Less(i, j int) bool { - av, ak := getElem(sv[i]) - bv, bk := getElem(sv[j]) - if ak == reflect.String && bk == reflect.String { - return av.String() < bv.String() - } - - return ak < bk -} - -func getElem(v reflect.Value) (reflect.Value, reflect.Kind) { - k := v.Kind() - for k == reflect.Interface || k == reflect.Ptr && !v.IsNil() { - v = v.Elem() - k = v.Kind() - } - - return v, k -} - -// parseTag splits a struct field's json tag into its name and -// comma-separated options. -func parseTag(tag string) (string, tagOptions) { - if idx := strings.Index(tag, ","); idx != -1 { - return tag[:idx], tagOptions(tag[idx+1:]) - } - return tag, tagOptions("") -} - -// Contains reports whether a comma-separated list of options -// contains a particular substr flag. substr must be surrounded by a -// string boundary or commas. -func (o tagOptions) Contains(optionName string) bool { - if len(o) == 0 { - return false - } - s := string(o) - for s != "" { - var next string - i := strings.Index(s, ",") - if i >= 0 { - s, next = s[:i], s[i+1:] - } - if s == optionName { - return true - } - s = next - } - return false -} diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/writer.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/writer.go deleted file mode 100644 index a76b63363..000000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/writer.go +++ /dev/null @@ -1,128 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -/* - * Set the writer error and return 0. - */ - -func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_WRITER_ERROR - emitter.problem = problem - - return false -} - -/* - * Flush the output buffer. - */ - -func yaml_emitter_flush(emitter *yaml_emitter_t) bool { - if emitter.write_handler == nil { - panic("Write handler must be set") /* Write handler must be set. */ - } - if emitter.encoding == yaml_ANY_ENCODING { - panic("Encoding must be set") /* Output encoding must be set. */ - } - - /* Check if the buffer is empty. */ - - if emitter.buffer_pos == 0 { - return true - } - - /* If the output encoding is UTF-8, we don't need to recode the buffer. */ - - if emitter.encoding == yaml_UTF8_ENCODING { - if err := emitter.write_handler(emitter, - emitter.buffer[:emitter.buffer_pos]); err != nil { - return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) - } - emitter.buffer_pos = 0 - return true - } - - /* Recode the buffer into the raw buffer. */ - - var low, high int - if emitter.encoding == yaml_UTF16LE_ENCODING { - low, high = 0, 1 - } else { - high, low = 1, 0 - } - - pos := 0 - for pos < emitter.buffer_pos { - - /* - * See the "reader.c" code for more details on UTF-8 encoding. Note - * that we assume that the buffer contains a valid UTF-8 sequence. - */ - - /* Read the next UTF-8 character. */ - - octet := emitter.buffer[pos] - - var w int - var value rune - switch { - case octet&0x80 == 0x00: - w, value = 1, rune(octet&0x7F) - case octet&0xE0 == 0xC0: - w, value = 2, rune(octet&0x1F) - case octet&0xF0 == 0xE0: - w, value = 3, rune(octet&0x0F) - case octet&0xF8 == 0xF0: - w, value = 4, rune(octet&0x07) - } - - for k := 1; k < w; k++ { - octet = emitter.buffer[pos+k] - value = (value << 6) + (rune(octet) & 0x3F) - } - - pos += w - - /* Write the character. */ - - if value < 0x10000 { - var b [2]byte - b[high] = byte(value >> 8) - b[low] = byte(value & 0xFF) - emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1]) - } else { - /* Write the character using a surrogate pair (check "reader.c"). */ - - var b [4]byte - value -= 0x10000 - b[high] = byte(0xD8 + (value >> 18)) - b[low] = byte((value >> 10) & 0xFF) - b[high+2] = byte(0xDC + ((value >> 8) & 0xFF)) - b[low+2] = byte(value & 0xFF) - emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3]) - } - } - - /* Write the raw buffer. */ - - // Write the raw buffer. - if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil { - return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) - } - - emitter.buffer_pos = 0 - emitter.raw_buffer = emitter.raw_buffer[:0] - return true -} diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/yaml_definesh.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/yaml_definesh.go deleted file mode 100644 index de4c05ad8..000000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/yaml_definesh.go +++ /dev/null @@ -1,22 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -const ( - yaml_VERSION_MAJOR = 0 - yaml_VERSION_MINOR = 1 - yaml_VERSION_PATCH = 6 - yaml_VERSION_STRING = "0.1.6" -) diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/yaml_privateh.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/yaml_privateh.go deleted file mode 100644 index 2b3b7d749..000000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/yaml_privateh.go +++ /dev/null @@ -1,891 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -const ( - INPUT_RAW_BUFFER_SIZE = 1024 - - /* - * The size of the input buffer. - * - * It should be possible to decode the whole raw buffer. - */ - INPUT_BUFFER_SIZE = (INPUT_RAW_BUFFER_SIZE * 3) - - /* - * The size of the output buffer. - */ - - OUTPUT_BUFFER_SIZE = 512 - - /* - * The size of the output raw buffer. - * - * It should be possible to encode the whole output buffer. - */ - - OUTPUT_RAW_BUFFER_SIZE = (OUTPUT_BUFFER_SIZE*2 + 2) - - INITIAL_STACK_SIZE = 16 - INITIAL_QUEUE_SIZE = 16 -) - -func width(b byte) int { - if b&0x80 == 0 { - return 1 - } - - if b&0xE0 == 0xC0 { - return 2 - } - - if b&0xF0 == 0xE0 { - return 3 - } - - if b&0xF8 == 0xF0 { - return 4 - } - - return 0 -} - -func copy_bytes(dest []byte, dest_pos *int, src []byte, src_pos *int) { - w := width(src[*src_pos]) - switch w { - case 4: - dest[*dest_pos+3] = src[*src_pos+3] - fallthrough - case 3: - dest[*dest_pos+2] = src[*src_pos+2] - fallthrough - case 2: - dest[*dest_pos+1] = src[*src_pos+1] - fallthrough - case 1: - dest[*dest_pos] = src[*src_pos] - default: - panic("invalid width") - } - *dest_pos += w - *src_pos += w -} - -// /* -// * Check if the character at the specified position is an alphabetical -// * character, a digit, '_', or '-'. -// */ - -func is_alpha(b byte) bool { - return (b >= '0' && b <= '9') || - (b >= 'A' && b <= 'Z') || - (b >= 'a' && b <= 'z') || - b == '_' || b == '-' -} - -// /* -// * Check if the character at the specified position is a digit. -// */ -// -func is_digit(b byte) bool { - return b >= '0' && b <= '9' -} - -// /* -// * Get the value of a digit. -// */ -// -func as_digit(b byte) int { - return int(b) - '0' -} - -// /* -// * Check if the character at the specified position is a hex-digit. -// */ -// -func is_hex(b byte) bool { - return (b >= '0' && b <= '9') || - (b >= 'A' && b <= 'F') || - (b >= 'a' && b <= 'f') -} - -// -// /* -// * Get the value of a hex-digit. -// */ -// -func as_hex(b byte) int { - if b >= 'A' && b <= 'F' { - return int(b) - 'A' + 10 - } else if b >= 'a' && b <= 'f' { - return int(b) - 'a' + 10 - } - return int(b) - '0' -} - -// #define AS_HEX_AT(string,offset) \ -// (((string).pointer[offset] >= (yaml_char_t) 'A' && \ -// (string).pointer[offset] <= (yaml_char_t) 'F') ? \ -// ((string).pointer[offset] - (yaml_char_t) 'A' + 10) : \ -// ((string).pointer[offset] >= (yaml_char_t) 'a' && \ -// (string).pointer[offset] <= (yaml_char_t) 'f') ? \ -// ((string).pointer[offset] - (yaml_char_t) 'a' + 10) : \ -// ((string).pointer[offset] - (yaml_char_t) '0')) - -// /* -// * Check if the character is a line break, space, tab, or NUL. -// */ -func is_blankz_at(b []byte, i int) bool { - return is_blank(b[i]) || is_breakz_at(b, i) -} - -// /* -// * Check if the character at the specified position is a line break. -// */ -func is_break_at(b []byte, i int) bool { - return b[i] == '\r' || /* CR (#xD)*/ - b[i] == '\n' || /* LF (#xA) */ - (b[i] == 0xC2 && b[i+1] == 0x85) || /* NEL (#x85) */ - (b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8) || /* LS (#x2028) */ - (b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) /* PS (#x2029) */ -} - -func is_breakz_at(b []byte, i int) bool { - return is_break_at(b, i) || is_z(b[i]) -} - -func is_crlf_at(b []byte, i int) bool { - return b[i] == '\r' && b[i+1] == '\n' -} - -// /* -// * Check if the character at the specified position is NUL. -// */ -func is_z(b byte) bool { - return b == 0x0 -} - -// /* -// * Check if the character at the specified position is space. -// */ -func is_space(b byte) bool { - return b == ' ' -} - -// -// /* -// * Check if the character at the specified position is tab. -// */ -func is_tab(b byte) bool { - return b == '\t' -} - -// /* -// * Check if the character at the specified position is blank (space or tab). -// */ -func is_blank(b byte) bool { - return is_space(b) || is_tab(b) -} - -// /* -// * Check if the character is ASCII. -// */ -func is_ascii(b byte) bool { - return b <= '\x7f' -} - -// /* -// * Check if the character can be printed unescaped. -// */ -func is_printable_at(b []byte, i int) bool { - return ((b[i] == 0x0A) || /* . == #x0A */ - (b[i] >= 0x20 && b[i] <= 0x7E) || /* #x20 <= . <= #x7E */ - (b[i] == 0xC2 && b[i+1] >= 0xA0) || /* #0xA0 <= . <= #xD7FF */ - (b[i] > 0xC2 && b[i] < 0xED) || - (b[i] == 0xED && b[i+1] < 0xA0) || - (b[i] == 0xEE) || - (b[i] == 0xEF && /* && . != #xFEFF */ - !(b[i+1] == 0xBB && b[i+2] == 0xBF) && - !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) -} - -func insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { - // collapse the slice - if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { - if parser.tokens_head != len(parser.tokens) { - // move the tokens down - copy(parser.tokens, parser.tokens[parser.tokens_head:]) - } - // readjust the length - parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] - parser.tokens_head = 0 - } - - parser.tokens = append(parser.tokens, *token) - if pos < 0 { - return - } - copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) - parser.tokens[parser.tokens_head+pos] = *token -} - -// /* -// * Check if the character at the specified position is BOM. -// */ -// -func is_bom_at(b []byte, i int) bool { - return b[i] == 0xEF && b[i+1] == 0xBB && b[i+2] == 0xBF -} - -// -// #ifdef HAVE_CONFIG_H -// #include -// #endif -// -// #include "./yaml.h" -// -// #include -// #include -// -// /* -// * Memory management. -// */ -// -// yaml_DECLARE(void *) -// yaml_malloc(size_t size); -// -// yaml_DECLARE(void *) -// yaml_realloc(void *ptr, size_t size); -// -// yaml_DECLARE(void) -// yaml_free(void *ptr); -// -// yaml_DECLARE(yaml_char_t *) -// yaml_strdup(const yaml_char_t *); -// -// /* -// * Reader: Ensure that the buffer contains at least `length` characters. -// */ -// -// yaml_DECLARE(int) -// yaml_parser_update_buffer(yaml_parser_t *parser, size_t length); -// -// /* -// * Scanner: Ensure that the token stack contains at least one token ready. -// */ -// -// yaml_DECLARE(int) -// yaml_parser_fetch_more_tokens(yaml_parser_t *parser); -// -// /* -// * The size of the input raw buffer. -// */ -// -// #define INPUT_RAW_BUFFER_SIZE 16384 -// -// /* -// * The size of the input buffer. -// * -// * It should be possible to decode the whole raw buffer. -// */ -// -// #define INPUT_BUFFER_SIZE (INPUT_RAW_BUFFER_SIZE*3) -// -// /* -// * The size of the output buffer. -// */ -// -// #define OUTPUT_BUFFER_SIZE 16384 -// -// /* -// * The size of the output raw buffer. -// * -// * It should be possible to encode the whole output buffer. -// */ -// -// #define OUTPUT_RAW_BUFFER_SIZE (OUTPUT_BUFFER_SIZE*2+2) -// -// /* -// * The size of other stacks and queues. -// */ -// -// #define INITIAL_STACK_SIZE 16 -// #define INITIAL_QUEUE_SIZE 16 -// #define INITIAL_STRING_SIZE 16 -// -// /* -// * Buffer management. -// */ -// -// #define BUFFER_INIT(context,buffer,size) \ -// (((buffer).start = yaml_malloc(size)) ? \ -// ((buffer).last = (buffer).pointer = (buffer).start, \ -// (buffer).end = (buffer).start+(size), \ -// 1) : \ -// ((context)->error = yaml_MEMORY_ERROR, \ -// 0)) -// -// #define BUFFER_DEL(context,buffer) \ -// (yaml_free((buffer).start), \ -// (buffer).start = (buffer).pointer = (buffer).end = 0) -// -// /* -// * String management. -// */ -// -// typedef struct { -// yaml_char_t *start; -// yaml_char_t *end; -// yaml_char_t *pointer; -// } yaml_string_t; -// -// yaml_DECLARE(int) -// yaml_string_extend(yaml_char_t **start, -// yaml_char_t **pointer, yaml_char_t **end); -// -// yaml_DECLARE(int) -// yaml_string_join( -// yaml_char_t **a_start, yaml_char_t **a_pointer, yaml_char_t **a_end, -// yaml_char_t **b_start, yaml_char_t **b_pointer, yaml_char_t **b_end); -// -// #define NULL_STRING { NULL, NULL, NULL } -// -// #define STRING(string,length) { (string), (string)+(length), (string) } -// -// #define STRING_ASSIGN(value,string,length) \ -// ((value).start = (string), \ -// (value).end = (string)+(length), \ -// (value).pointer = (string)) -// -// #define STRING_INIT(context,string,size) \ -// (((string).start = yaml_malloc(size)) ? \ -// ((string).pointer = (string).start, \ -// (string).end = (string).start+(size), \ -// memset((string).start, 0, (size)), \ -// 1) : \ -// ((context)->error = yaml_MEMORY_ERROR, \ -// 0)) -// -// #define STRING_DEL(context,string) \ -// (yaml_free((string).start), \ -// (string).start = (string).pointer = (string).end = 0) -// -// #define STRING_EXTEND(context,string) \ -// (((string).pointer+5 < (string).end) \ -// || yaml_string_extend(&(string).start, \ -// &(string).pointer, &(string).end)) -// -// #define CLEAR(context,string) \ -// ((string).pointer = (string).start, \ -// memset((string).start, 0, (string).end-(string).start)) -// -// #define JOIN(context,string_a,string_b) \ -// ((yaml_string_join(&(string_a).start, &(string_a).pointer, \ -// &(string_a).end, &(string_b).start, \ -// &(string_b).pointer, &(string_b).end)) ? \ -// ((string_b).pointer = (string_b).start, \ -// 1) : \ -// ((context)->error = yaml_MEMORY_ERROR, \ -// 0)) -// -// /* -// * String check operations. -// */ -// -// /* -// * Check the octet at the specified position. -// */ -// -// #define CHECK_AT(string,octet,offset) \ -// ((string).pointer[offset] == (yaml_char_t)(octet)) -// -// /* -// * Check the current octet in the buffer. -// */ -// -// #define CHECK(string,octet) CHECK_AT((string),(octet),0) -// -// /* -// * Check if the character at the specified position is an alphabetical -// * character, a digit, '_', or '-'. -// */ -// -// #define IS_ALPHA_AT(string,offset) \ -// (((string).pointer[offset] >= (yaml_char_t) '0' && \ -// (string).pointer[offset] <= (yaml_char_t) '9') || \ -// ((string).pointer[offset] >= (yaml_char_t) 'A' && \ -// (string).pointer[offset] <= (yaml_char_t) 'Z') || \ -// ((string).pointer[offset] >= (yaml_char_t) 'a' && \ -// (string).pointer[offset] <= (yaml_char_t) 'z') || \ -// (string).pointer[offset] == '_' || \ -// (string).pointer[offset] == '-') -// -// #define IS_ALPHA(string) IS_ALPHA_AT((string),0) -// -// /* -// * Check if the character at the specified position is a digit. -// */ -// -// #define IS_DIGIT_AT(string,offset) \ -// (((string).pointer[offset] >= (yaml_char_t) '0' && \ -// (string).pointer[offset] <= (yaml_char_t) '9')) -// -// #define IS_DIGIT(string) IS_DIGIT_AT((string),0) -// -// /* -// * Get the value of a digit. -// */ -// -// #define AS_DIGIT_AT(string,offset) \ -// ((string).pointer[offset] - (yaml_char_t) '0') -// -// #define AS_DIGIT(string) AS_DIGIT_AT((string),0) -// -// /* -// * Check if the character at the specified position is a hex-digit. -// */ -// -// #define IS_HEX_AT(string,offset) \ -// (((string).pointer[offset] >= (yaml_char_t) '0' && \ -// (string).pointer[offset] <= (yaml_char_t) '9') || \ -// ((string).pointer[offset] >= (yaml_char_t) 'A' && \ -// (string).pointer[offset] <= (yaml_char_t) 'F') || \ -// ((string).pointer[offset] >= (yaml_char_t) 'a' && \ -// (string).pointer[offset] <= (yaml_char_t) 'f')) -// -// #define IS_HEX(string) IS_HEX_AT((string),0) -// -// /* -// * Get the value of a hex-digit. -// */ -// -// #define AS_HEX_AT(string,offset) \ -// (((string).pointer[offset] >= (yaml_char_t) 'A' && \ -// (string).pointer[offset] <= (yaml_char_t) 'F') ? \ -// ((string).pointer[offset] - (yaml_char_t) 'A' + 10) : \ -// ((string).pointer[offset] >= (yaml_char_t) 'a' && \ -// (string).pointer[offset] <= (yaml_char_t) 'f') ? \ -// ((string).pointer[offset] - (yaml_char_t) 'a' + 10) : \ -// ((string).pointer[offset] - (yaml_char_t) '0')) -// -// #define AS_HEX(string) AS_HEX_AT((string),0) -// -// /* -// * Check if the character is ASCII. -// */ -// -// #define IS_ASCII_AT(string,offset) \ -// ((string).pointer[offset] <= (yaml_char_t) '\x7F') -// -// #define IS_ASCII(string) IS_ASCII_AT((string),0) -// -// /* -// * Check if the character can be printed unescaped. -// */ -// -// #define IS_PRINTABLE_AT(string,offset) \ -// (((string).pointer[offset] == 0x0A) /* . == #x0A */ \ -// || ((string).pointer[offset] >= 0x20 /* #x20 <= . <= #x7E */ \ -// && (string).pointer[offset] <= 0x7E) \ -// || ((string).pointer[offset] == 0xC2 /* #0xA0 <= . <= #xD7FF */ \ -// && (string).pointer[offset+1] >= 0xA0) \ -// || ((string).pointer[offset] > 0xC2 \ -// && (string).pointer[offset] < 0xED) \ -// || ((string).pointer[offset] == 0xED \ -// && (string).pointer[offset+1] < 0xA0) \ -// || ((string).pointer[offset] == 0xEE) \ -// || ((string).pointer[offset] == 0xEF /* #xE000 <= . <= #xFFFD */ \ -// && !((string).pointer[offset+1] == 0xBB /* && . != #xFEFF */ \ -// && (string).pointer[offset+2] == 0xBF) \ -// && !((string).pointer[offset+1] == 0xBF \ -// && ((string).pointer[offset+2] == 0xBE \ -// || (string).pointer[offset+2] == 0xBF)))) -// -// #define IS_PRINTABLE(string) IS_PRINTABLE_AT((string),0) -// -// /* -// * Check if the character at the specified position is NUL. -// */ -// -// #define IS_Z_AT(string,offset) CHECK_AT((string),'\0',(offset)) -// -// #define IS_Z(string) IS_Z_AT((string),0) -// -// /* -// * Check if the character at the specified position is BOM. -// */ -// -// #define IS_BOM_AT(string,offset) \ -// (CHECK_AT((string),'\xEF',(offset)) \ -// && CHECK_AT((string),'\xBB',(offset)+1) \ -// && CHECK_AT((string),'\xBF',(offset)+2)) /* BOM (#xFEFF) */ -// -// #define IS_BOM(string) IS_BOM_AT(string,0) -// -// /* -// * Check if the character at the specified position is space. -// */ -// -// #define IS_SPACE_AT(string,offset) CHECK_AT((string),' ',(offset)) -// -// #define IS_SPACE(string) IS_SPACE_AT((string),0) -// -// /* -// * Check if the character at the specified position is tab. -// */ -// -// #define IS_TAB_AT(string,offset) CHECK_AT((string),'\t',(offset)) -// -// #define IS_TAB(string) IS_TAB_AT((string),0) -// -// /* -// * Check if the character at the specified position is blank (space or tab). -// */ -// -// #define IS_BLANK_AT(string,offset) \ -// (IS_SPACE_AT((string),(offset)) || IS_TAB_AT((string),(offset))) -// -// #define IS_BLANK(string) IS_BLANK_AT((string),0) -// -// /* -// * Check if the character at the specified position is a line break. -// */ -// -// #define IS_BREAK_AT(string,offset) \ -// (CHECK_AT((string),'\r',(offset)) /* CR (#xD)*/ \ -// || CHECK_AT((string),'\n',(offset)) /* LF (#xA) */ \ -// || (CHECK_AT((string),'\xC2',(offset)) \ -// && CHECK_AT((string),'\x85',(offset)+1)) /* NEL (#x85) */ \ -// || (CHECK_AT((string),'\xE2',(offset)) \ -// && CHECK_AT((string),'\x80',(offset)+1) \ -// && CHECK_AT((string),'\xA8',(offset)+2)) /* LS (#x2028) */ \ -// || (CHECK_AT((string),'\xE2',(offset)) \ -// && CHECK_AT((string),'\x80',(offset)+1) \ -// && CHECK_AT((string),'\xA9',(offset)+2))) /* PS (#x2029) */ -// -// #define IS_BREAK(string) IS_BREAK_AT((string),0) -// -// #define IS_CRLF_AT(string,offset) \ -// (CHECK_AT((string),'\r',(offset)) && CHECK_AT((string),'\n',(offset)+1)) -// -// #define IS_CRLF(string) IS_CRLF_AT((string),0) -// -// /* -// * Check if the character is a line break or NUL. -// */ -// -// #define IS_BREAKZ_AT(string,offset) \ -// (IS_BREAK_AT((string),(offset)) || IS_Z_AT((string),(offset))) -// -// #define IS_BREAKZ(string) IS_BREAKZ_AT((string),0) -// -// /* -// * Check if the character is a line break, space, or NUL. -// */ -// -// #define IS_SPACEZ_AT(string,offset) \ -// (IS_SPACE_AT((string),(offset)) || IS_BREAKZ_AT((string),(offset))) -// -// #define IS_SPACEZ(string) IS_SPACEZ_AT((string),0) -// -// /* -// * Check if the character is a line break, space, tab, or NUL. -// */ -// -// #define IS_BLANKZ_AT(string,offset) \ -// (IS_BLANK_AT((string),(offset)) || IS_BREAKZ_AT((string),(offset))) -// -// #define IS_BLANKZ(string) IS_BLANKZ_AT((string),0) -// -// /* -// * Determine the width of the character. -// */ -// -// #define WIDTH_AT(string,offset) \ -// (((string).pointer[offset] & 0x80) == 0x00 ? 1 : \ -// ((string).pointer[offset] & 0xE0) == 0xC0 ? 2 : \ -// ((string).pointer[offset] & 0xF0) == 0xE0 ? 3 : \ -// ((string).pointer[offset] & 0xF8) == 0xF0 ? 4 : 0) -// -// #define WIDTH(string) WIDTH_AT((string),0) -// -// /* -// * Move the string pointer to the next character. -// */ -// -// #define MOVE(string) ((string).pointer += WIDTH((string))) -// -// /* -// * Copy a character and move the pointers of both strings. -// */ -// -// #define COPY(string_a,string_b) \ -// ((*(string_b).pointer & 0x80) == 0x00 ? \ -// (*((string_a).pointer++) = *((string_b).pointer++)) : \ -// (*(string_b).pointer & 0xE0) == 0xC0 ? \ -// (*((string_a).pointer++) = *((string_b).pointer++), \ -// *((string_a).pointer++) = *((string_b).pointer++)) : \ -// (*(string_b).pointer & 0xF0) == 0xE0 ? \ -// (*((string_a).pointer++) = *((string_b).pointer++), \ -// *((string_a).pointer++) = *((string_b).pointer++), \ -// *((string_a).pointer++) = *((string_b).pointer++)) : \ -// (*(string_b).pointer & 0xF8) == 0xF0 ? \ -// (*((string_a).pointer++) = *((string_b).pointer++), \ -// *((string_a).pointer++) = *((string_b).pointer++), \ -// *((string_a).pointer++) = *((string_b).pointer++), \ -// *((string_a).pointer++) = *((string_b).pointer++)) : 0) -// -// /* -// * Stack and queue management. -// */ -// -// yaml_DECLARE(int) -// yaml_stack_extend(void **start, void **top, void **end); -// -// yaml_DECLARE(int) -// yaml_queue_extend(void **start, void **head, void **tail, void **end); -// -// #define STACK_INIT(context,stack,size) \ -// (((stack).start = yaml_malloc((size)*sizeof(*(stack).start))) ? \ -// ((stack).top = (stack).start, \ -// (stack).end = (stack).start+(size), \ -// 1) : \ -// ((context)->error = yaml_MEMORY_ERROR, \ -// 0)) -// -// #define STACK_DEL(context,stack) \ -// (yaml_free((stack).start), \ -// (stack).start = (stack).top = (stack).end = 0) -// -// #define STACK_EMPTY(context,stack) \ -// ((stack).start == (stack).top) -// -// #define PUSH(context,stack,value) \ -// (((stack).top != (stack).end \ -// || yaml_stack_extend((void **)&(stack).start, \ -// (void **)&(stack).top, (void **)&(stack).end)) ? \ -// (*((stack).top++) = value, \ -// 1) : \ -// ((context)->error = yaml_MEMORY_ERROR, \ -// 0)) -// -// #define POP(context,stack) \ -// (*(--(stack).top)) -// -// #define QUEUE_INIT(context,queue,size) \ -// (((queue).start = yaml_malloc((size)*sizeof(*(queue).start))) ? \ -// ((queue).head = (queue).tail = (queue).start, \ -// (queue).end = (queue).start+(size), \ -// 1) : \ -// ((context)->error = yaml_MEMORY_ERROR, \ -// 0)) -// -// #define QUEUE_DEL(context,queue) \ -// (yaml_free((queue).start), \ -// (queue).start = (queue).head = (queue).tail = (queue).end = 0) -// -// #define QUEUE_EMPTY(context,queue) \ -// ((queue).head == (queue).tail) -// -// #define ENQUEUE(context,queue,value) \ -// (((queue).tail != (queue).end \ -// || yaml_queue_extend((void **)&(queue).start, (void **)&(queue).head, \ -// (void **)&(queue).tail, (void **)&(queue).end)) ? \ -// (*((queue).tail++) = value, \ -// 1) : \ -// ((context)->error = yaml_MEMORY_ERROR, \ -// 0)) -// -// #define DEQUEUE(context,queue) \ -// (*((queue).head++)) -// -// #define QUEUE_INSERT(context,queue,index,value) \ -// (((queue).tail != (queue).end \ -// || yaml_queue_extend((void **)&(queue).start, (void **)&(queue).head, \ -// (void **)&(queue).tail, (void **)&(queue).end)) ? \ -// (memmove((queue).head+(index)+1,(queue).head+(index), \ -// ((queue).tail-(queue).head-(index))*sizeof(*(queue).start)), \ -// *((queue).head+(index)) = value, \ -// (queue).tail++, \ -// 1) : \ -// ((context)->error = yaml_MEMORY_ERROR, \ -// 0)) -// -// /* -// * Token initializers. -// */ -// -// #define TOKEN_INIT(token,token_type,token_start_mark,token_end_mark) \ -// (memset(&(token), 0, sizeof(yaml_token_t)), \ -// (token).type = (token_type), \ -// (token).start_mark = (token_start_mark), \ -// (token).end_mark = (token_end_mark)) -// -// #define STREAM_START_TOKEN_INIT(token,token_encoding,start_mark,end_mark) \ -// (TOKEN_INIT((token),yaml_STREAM_START_TOKEN,(start_mark),(end_mark)), \ -// (token).data.stream_start.encoding = (token_encoding)) -// -// #define STREAM_END_TOKEN_INIT(token,start_mark,end_mark) \ -// (TOKEN_INIT((token),yaml_STREAM_END_TOKEN,(start_mark),(end_mark))) -// -// #define ALIAS_TOKEN_INIT(token,token_value,start_mark,end_mark) \ -// (TOKEN_INIT((token),yaml_ALIAS_TOKEN,(start_mark),(end_mark)), \ -// (token).data.alias.value = (token_value)) -// -// #define ANCHOR_TOKEN_INIT(token,token_value,start_mark,end_mark) \ -// (TOKEN_INIT((token),yaml_ANCHOR_TOKEN,(start_mark),(end_mark)), \ -// (token).data.anchor.value = (token_value)) -// -// #define TAG_TOKEN_INIT(token,token_handle,token_suffix,start_mark,end_mark) \ -// (TOKEN_INIT((token),yaml_TAG_TOKEN,(start_mark),(end_mark)), \ -// (token).data.tag.handle = (token_handle), \ -// (token).data.tag.suffix = (token_suffix)) -// -// #define SCALAR_TOKEN_INIT(token,token_value,token_length,token_style,start_mark,end_mark) \ -// (TOKEN_INIT((token),yaml_SCALAR_TOKEN,(start_mark),(end_mark)), \ -// (token).data.scalar.value = (token_value), \ -// (token).data.scalar.length = (token_length), \ -// (token).data.scalar.style = (token_style)) -// -// #define VERSION_DIRECTIVE_TOKEN_INIT(token,token_major,token_minor,start_mark,end_mark) \ -// (TOKEN_INIT((token),yaml_VERSION_DIRECTIVE_TOKEN,(start_mark),(end_mark)), \ -// (token).data.version_directive.major = (token_major), \ -// (token).data.version_directive.minor = (token_minor)) -// -// #define TAG_DIRECTIVE_TOKEN_INIT(token,token_handle,token_prefix,start_mark,end_mark) \ -// (TOKEN_INIT((token),yaml_TAG_DIRECTIVE_TOKEN,(start_mark),(end_mark)), \ -// (token).data.tag_directive.handle = (token_handle), \ -// (token).data.tag_directive.prefix = (token_prefix)) -// -// /* -// * Event initializers. -// */ -// -// #define EVENT_INIT(event,event_type,event_start_mark,event_end_mark) \ -// (memset(&(event), 0, sizeof(yaml_event_t)), \ -// (event).type = (event_type), \ -// (event).start_mark = (event_start_mark), \ -// (event).end_mark = (event_end_mark)) -// -// #define STREAM_START_EVENT_INIT(event,event_encoding,start_mark,end_mark) \ -// (EVENT_INIT((event),yaml_STREAM_START_EVENT,(start_mark),(end_mark)), \ -// (event).data.stream_start.encoding = (event_encoding)) -// -// #define STREAM_END_EVENT_INIT(event,start_mark,end_mark) \ -// (EVENT_INIT((event),yaml_STREAM_END_EVENT,(start_mark),(end_mark))) -// -// #define DOCUMENT_START_EVENT_INIT(event,event_version_directive, \ -// event_tag_directives_start,event_tag_directives_end,event_implicit,start_mark,end_mark) \ -// (EVENT_INIT((event),yaml_DOCUMENT_START_EVENT,(start_mark),(end_mark)), \ -// (event).data.document_start.version_directive = (event_version_directive), \ -// (event).data.document_start.tag_directives.start = (event_tag_directives_start), \ -// (event).data.document_start.tag_directives.end = (event_tag_directives_end), \ -// (event).data.document_start.implicit = (event_implicit)) -// -// #define DOCUMENT_END_EVENT_INIT(event,event_implicit,start_mark,end_mark) \ -// (EVENT_INIT((event),yaml_DOCUMENT_END_EVENT,(start_mark),(end_mark)), \ -// (event).data.document_end.implicit = (event_implicit)) -// -// #define ALIAS_EVENT_INIT(event,event_anchor,start_mark,end_mark) \ -// (EVENT_INIT((event),yaml_ALIAS_EVENT,(start_mark),(end_mark)), \ -// (event).data.alias.anchor = (event_anchor)) -// -// #define SCALAR_EVENT_INIT(event,event_anchor,event_tag,event_value,event_length, \ -// event_plain_implicit, event_quoted_implicit,event_style,start_mark,end_mark) \ -// (EVENT_INIT((event),yaml_SCALAR_EVENT,(start_mark),(end_mark)), \ -// (event).data.scalar.anchor = (event_anchor), \ -// (event).data.scalar.tag = (event_tag), \ -// (event).data.scalar.value = (event_value), \ -// (event).data.scalar.length = (event_length), \ -// (event).data.scalar.plain_implicit = (event_plain_implicit), \ -// (event).data.scalar.quoted_implicit = (event_quoted_implicit), \ -// (event).data.scalar.style = (event_style)) -// -// #define SEQUENCE_START_EVENT_INIT(event,event_anchor,event_tag, \ -// event_implicit,event_style,start_mark,end_mark) \ -// (EVENT_INIT((event),yaml_SEQUENCE_START_EVENT,(start_mark),(end_mark)), \ -// (event).data.sequence_start.anchor = (event_anchor), \ -// (event).data.sequence_start.tag = (event_tag), \ -// (event).data.sequence_start.implicit = (event_implicit), \ -// (event).data.sequence_start.style = (event_style)) -// -// #define SEQUENCE_END_EVENT_INIT(event,start_mark,end_mark) \ -// (EVENT_INIT((event),yaml_SEQUENCE_END_EVENT,(start_mark),(end_mark))) -// -// #define MAPPING_START_EVENT_INIT(event,event_anchor,event_tag, \ -// event_implicit,event_style,start_mark,end_mark) \ -// (EVENT_INIT((event),yaml_MAPPING_START_EVENT,(start_mark),(end_mark)), \ -// (event).data.mapping_start.anchor = (event_anchor), \ -// (event).data.mapping_start.tag = (event_tag), \ -// (event).data.mapping_start.implicit = (event_implicit), \ -// (event).data.mapping_start.style = (event_style)) -// -// #define MAPPING_END_EVENT_INIT(event,start_mark,end_mark) \ -// (EVENT_INIT((event),yaml_MAPPING_END_EVENT,(start_mark),(end_mark))) -// -// /* -// * Document initializer. -// */ -// -// #define DOCUMENT_INIT(document,document_nodes_start,document_nodes_end, \ -// document_version_directive,document_tag_directives_start, \ -// document_tag_directives_end,document_start_implicit, \ -// document_end_implicit,document_start_mark,document_end_mark) \ -// (memset(&(document), 0, sizeof(yaml_document_t)), \ -// (document).nodes.start = (document_nodes_start), \ -// (document).nodes.end = (document_nodes_end), \ -// (document).nodes.top = (document_nodes_start), \ -// (document).version_directive = (document_version_directive), \ -// (document).tag_directives.start = (document_tag_directives_start), \ -// (document).tag_directives.end = (document_tag_directives_end), \ -// (document).start_implicit = (document_start_implicit), \ -// (document).end_implicit = (document_end_implicit), \ -// (document).start_mark = (document_start_mark), \ -// (document).end_mark = (document_end_mark)) -// -// /* -// * Node initializers. -// */ -// -// #define NODE_INIT(node,node_type,node_tag,node_start_mark,node_end_mark) \ -// (memset(&(node), 0, sizeof(yaml_node_t)), \ -// (node).type = (node_type), \ -// (node).tag = (node_tag), \ -// (node).start_mark = (node_start_mark), \ -// (node).end_mark = (node_end_mark)) -// -// #define SCALAR_NODE_INIT(node,node_tag,node_value,node_length, \ -// node_style,start_mark,end_mark) \ -// (NODE_INIT((node),yaml_SCALAR_NODE,(node_tag),(start_mark),(end_mark)), \ -// (node).data.scalar.value = (node_value), \ -// (node).data.scalar.length = (node_length), \ -// (node).data.scalar.style = (node_style)) -// -// #define SEQUENCE_NODE_INIT(node,node_tag,node_items_start,node_items_end, \ -// node_style,start_mark,end_mark) \ -// (NODE_INIT((node),yaml_SEQUENCE_NODE,(node_tag),(start_mark),(end_mark)), \ -// (node).data.sequence.items.start = (node_items_start), \ -// (node).data.sequence.items.end = (node_items_end), \ -// (node).data.sequence.items.top = (node_items_start), \ -// (node).data.sequence.style = (node_style)) -// -// #define MAPPING_NODE_INIT(node,node_tag,node_pairs_start,node_pairs_end, \ -// node_style,start_mark,end_mark) \ -// (NODE_INIT((node),yaml_MAPPING_NODE,(node_tag),(start_mark),(end_mark)), \ -// (node).data.mapping.pairs.start = (node_pairs_start), \ -// (node).data.mapping.pairs.end = (node_pairs_end), \ -// (node).data.mapping.pairs.top = (node_pairs_start), \ -// (node).data.mapping.style = (node_style)) -// diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/yamlh.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/yamlh.go deleted file mode 100644 index d608dbb36..000000000 --- a/vendor/github.com/cloudfoundry-incubator/candiedyaml/yamlh.go +++ /dev/null @@ -1,953 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package candiedyaml - -import ( - "fmt" - "io" -) - -/** The version directive data. */ -type yaml_version_directive_t struct { - major int // The major version number - minor int // The minor version number -} - -/** The tag directive data. */ -type yaml_tag_directive_t struct { - handle []byte // The tag handle - prefix []byte // The tag prefix -} - -/** The stream encoding. */ -type yaml_encoding_t int - -const ( - /** Let the parser choose the encoding. */ - yaml_ANY_ENCODING yaml_encoding_t = iota - /** The defau lt UTF-8 encoding. */ - yaml_UTF8_ENCODING - /** The UTF-16-LE encoding with BOM. */ - yaml_UTF16LE_ENCODING - /** The UTF-16-BE encoding with BOM. */ - yaml_UTF16BE_ENCODING -) - -/** Line break types. */ -type yaml_break_t int - -const ( - yaml_ANY_BREAK yaml_break_t = iota /** Let the parser choose the break type. */ - yaml_CR_BREAK /** Use CR for line breaks (Mac style). */ - yaml_LN_BREAK /** Use LN for line breaks (Unix style). */ - yaml_CRLN_BREAK /** Use CR LN for line breaks (DOS style). */ -) - -/** Many bad things could happen with the parser and emitter. */ -type YAML_error_type_t int - -const ( - /** No error is produced. */ - yaml_NO_ERROR YAML_error_type_t = iota - - /** Cannot allocate or reallocate a block of memory. */ - yaml_MEMORY_ERROR - - /** Cannot read or decode the input stream. */ - yaml_READER_ERROR - /** Cannot scan the input stream. */ - yaml_SCANNER_ERROR - /** Cannot parse the input stream. */ - yaml_PARSER_ERROR - /** Cannot compose a YAML document. */ - yaml_COMPOSER_ERROR - - /** Cannot write to the output stream. */ - yaml_WRITER_ERROR - /** Cannot emit a YAML stream. */ - yaml_EMITTER_ERROR -) - -/** The pointer position. */ -type YAML_mark_t struct { - /** The position index. */ - index int - - /** The position line. */ - line int - - /** The position column. */ - column int -} - -func (m YAML_mark_t) String() string { - return fmt.Sprintf("line %d, column %d", m.line, m.column) -} - -/** @} */ - -/** - * @defgroup styles Node Styles - * @{ - */ - -type yaml_style_t int - -/** Scalar styles. */ -type yaml_scalar_style_t yaml_style_t - -const ( - /** Let the emitter choose the style. */ - yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota - - /** The plain scalar style. */ - yaml_PLAIN_SCALAR_STYLE - - /** The single-quoted scalar style. */ - yaml_SINGLE_QUOTED_SCALAR_STYLE - /** The double-quoted scalar style. */ - yaml_DOUBLE_QUOTED_SCALAR_STYLE - - /** The literal scalar style. */ - yaml_LITERAL_SCALAR_STYLE - /** The folded scalar style. */ - yaml_FOLDED_SCALAR_STYLE -) - -/** Sequence styles. */ -type yaml_sequence_style_t yaml_style_t - -const ( - /** Let the emitter choose the style. */ - yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota - - /** The block sequence style. */ - yaml_BLOCK_SEQUENCE_STYLE - /** The flow sequence style. */ - yaml_FLOW_SEQUENCE_STYLE -) - -/** Mapping styles. */ -type yaml_mapping_style_t yaml_style_t - -const ( - /** Let the emitter choose the style. */ - yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota - - /** The block mapping style. */ - yaml_BLOCK_MAPPING_STYLE - /** The flow mapping style. */ - yaml_FLOW_MAPPING_STYLE - -/* yaml_FLOW_SET_MAPPING_STYLE */ -) - -/** @} */ - -/** - * @defgroup tokens Tokens - * @{ - */ - -/** Token types. */ -type yaml_token_type_t int - -const ( - /** An empty token. */ - yaml_NO_TOKEN yaml_token_type_t = iota - - /** A STREAM-START token. */ - yaml_STREAM_START_TOKEN - /** A STREAM-END token. */ - yaml_STREAM_END_TOKEN - - /** A VERSION-DIRECTIVE token. */ - yaml_VERSION_DIRECTIVE_TOKEN - /** A TAG-DIRECTIVE token. */ - yaml_TAG_DIRECTIVE_TOKEN - /** A DOCUMENT-START token. */ - yaml_DOCUMENT_START_TOKEN - /** A DOCUMENT-END token. */ - yaml_DOCUMENT_END_TOKEN - - /** A BLOCK-SEQUENCE-START token. */ - yaml_BLOCK_SEQUENCE_START_TOKEN - /** A BLOCK-SEQUENCE-END token. */ - yaml_BLOCK_MAPPING_START_TOKEN - /** A BLOCK-END token. */ - yaml_BLOCK_END_TOKEN - - /** A FLOW-SEQUENCE-START token. */ - yaml_FLOW_SEQUENCE_START_TOKEN - /** A FLOW-SEQUENCE-END token. */ - yaml_FLOW_SEQUENCE_END_TOKEN - /** A FLOW-MAPPING-START token. */ - yaml_FLOW_MAPPING_START_TOKEN - /** A FLOW-MAPPING-END token. */ - yaml_FLOW_MAPPING_END_TOKEN - - /** A BLOCK-ENTRY token. */ - yaml_BLOCK_ENTRY_TOKEN - /** A FLOW-ENTRY token. */ - yaml_FLOW_ENTRY_TOKEN - /** A KEY token. */ - yaml_KEY_TOKEN - /** A VALUE token. */ - yaml_VALUE_TOKEN - - /** An ALIAS token. */ - yaml_ALIAS_TOKEN - /** An ANCHOR token. */ - yaml_ANCHOR_TOKEN - /** A TAG token. */ - yaml_TAG_TOKEN - /** A SCALAR token. */ - yaml_SCALAR_TOKEN -) - -/** The token structure. */ -type yaml_token_t struct { - - /** The token type. */ - token_type yaml_token_type_t - - /** The token data. */ - /** The stream start (for @c yaml_STREAM_START_TOKEN). */ - encoding yaml_encoding_t - - /** The alias (for @c yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN,yaml_TAG_TOKEN ). */ - /** The anchor (for @c ). */ - /** The scalar value (for @c ). */ - value []byte - - /** The tag suffix. */ - suffix []byte - - /** The scalar value (for @c yaml_SCALAR_TOKEN). */ - /** The scalar style. */ - style yaml_scalar_style_t - - /** The version directive (for @c yaml_VERSION_DIRECTIVE_TOKEN). */ - version_directive yaml_version_directive_t - - /** The tag directive (for @c yaml_TAG_DIRECTIVE_TOKEN). */ - prefix []byte - - /** The beginning of the token. */ - start_mark YAML_mark_t - /** The end of the token. */ - end_mark YAML_mark_t - - major, minor int -} - -/** - * @defgroup events Events - * @{ - */ - -/** Event types. */ -type yaml_event_type_t int - -const ( - /** An empty event. */ - yaml_NO_EVENT yaml_event_type_t = iota - - /** A STREAM-START event. */ - yaml_STREAM_START_EVENT - /** A STREAM-END event. */ - yaml_STREAM_END_EVENT - - /** A DOCUMENT-START event. */ - yaml_DOCUMENT_START_EVENT - /** A DOCUMENT-END event. */ - yaml_DOCUMENT_END_EVENT - - /** An ALIAS event. */ - yaml_ALIAS_EVENT - /** A SCALAR event. */ - yaml_SCALAR_EVENT - - /** A SEQUENCE-START event. */ - yaml_SEQUENCE_START_EVENT - /** A SEQUENCE-END event. */ - yaml_SEQUENCE_END_EVENT - - /** A MAPPING-START event. */ - yaml_MAPPING_START_EVENT - /** A MAPPING-END event. */ - yaml_MAPPING_END_EVENT -) - -/** The event structure. */ -type yaml_event_t struct { - - /** The event type. */ - event_type yaml_event_type_t - - /** The stream parameters (for @c yaml_STREAM_START_EVENT). */ - encoding yaml_encoding_t - - /** The document parameters (for @c yaml_DOCUMENT_START_EVENT). */ - version_directive *yaml_version_directive_t - - /** The beginning and end of the tag directives list. */ - tag_directives []yaml_tag_directive_t - - /** The document parameters (for @c yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT,yaml_MAPPING_START_EVENT). */ - /** Is the document indicator implicit? */ - implicit bool - - /** The alias parameters (for @c yaml_ALIAS_EVENT,yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). */ - /** The anchor. */ - anchor []byte - - /** The scalar parameters (for @c yaml_SCALAR_EVENT,yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). */ - /** The tag. */ - tag []byte - /** The scalar value. */ - value []byte - - /** Is the tag optional for the plain style? */ - plain_implicit bool - /** Is the tag optional for any non-plain style? */ - quoted_implicit bool - - /** The sequence parameters (for @c yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). */ - /** The sequence style. */ - /** The scalar style. */ - style yaml_style_t - - /** The beginning of the event. */ - start_mark, end_mark YAML_mark_t -} - -/** - * @defgroup nodes Nodes - * @{ - */ - -const ( - /** The tag @c !!null with the only possible value: @c null. */ - yaml_NULL_TAG = "tag:yaml.org,2002:null" - /** The tag @c !!bool with the values: @c true and @c falce. */ - yaml_BOOL_TAG = "tag:yaml.org,2002:bool" - /** The tag @c !!str for string values. */ - yaml_STR_TAG = "tag:yaml.org,2002:str" - /** The tag @c !!int for integer values. */ - yaml_INT_TAG = "tag:yaml.org,2002:int" - /** The tag @c !!float for float values. */ - yaml_FLOAT_TAG = "tag:yaml.org,2002:float" - /** The tag @c !!timestamp for date and time values. */ - yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" - - /** The tag @c !!seq is used to denote sequences. */ - yaml_SEQ_TAG = "tag:yaml.org,2002:seq" - /** The tag @c !!map is used to denote mapping. */ - yaml_MAP_TAG = "tag:yaml.org,2002:map" - - /** The default scalar tag is @c !!str. */ - yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG - /** The default sequence tag is @c !!seq. */ - yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG - /** The default mapping tag is @c !!map. */ - yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG - - yaml_BINARY_TAG = "tag:yaml.org,2002:binary" -) - -/** Node types. */ -type yaml_node_type_t int - -const ( - /** An empty node. */ - yaml_NO_NODE yaml_node_type_t = iota - - /** A scalar node. */ - yaml_SCALAR_NODE - /** A sequence node. */ - yaml_SEQUENCE_NODE - /** A mapping node. */ - yaml_MAPPING_NODE -) - -/** An element of a sequence node. */ -type yaml_node_item_t int - -/** An element of a mapping node. */ -type yaml_node_pair_t struct { - /** The key of the element. */ - key int - /** The value of the element. */ - value int -} - -/** The node structure. */ -type yaml_node_t struct { - - /** The node type. */ - node_type yaml_node_type_t - - /** The node tag. */ - tag []byte - - /** The scalar parameters (for @c yaml_SCALAR_NODE). */ - scalar struct { - /** The scalar value. */ - value []byte - /** The scalar style. */ - style yaml_scalar_style_t - } - - /** The sequence parameters (for @c yaml_SEQUENCE_NODE). */ - sequence struct { - /** The stack of sequence items. */ - items []yaml_node_item_t - /** The sequence style. */ - style yaml_sequence_style_t - } - - /** The mapping parameters (for @c yaml_MAPPING_NODE). */ - mapping struct { - /** The stack of mapping pairs (key, value). */ - pairs []yaml_node_pair_t - /** The mapping style. */ - style yaml_mapping_style_t - } - - /** The beginning of the node. */ - start_mark YAML_mark_t - /** The end of the node. */ - end_mark YAML_mark_t -} - -/** The document structure. */ -type yaml_document_t struct { - - /** The document nodes. */ - nodes []yaml_node_t - - /** The version directive. */ - version_directive *yaml_version_directive_t - - /** The list of tag directives. */ - tags []yaml_tag_directive_t - - /** Is the document start indicator implicit? */ - start_implicit bool - /** Is the document end indicator implicit? */ - end_implicit bool - - /** The beginning of the document. */ - start_mark YAML_mark_t - /** The end of the document. */ - end_mark YAML_mark_t -} - -/** - * The prototype of a read handler. - * - * The read handler is called when the parser needs to read more bytes from the - * source. The handler should write not more than @a size bytes to the @a - * buffer. The number of written bytes should be set to the @a length variable. - * - * @param[in,out] data A pointer to an application data specified by - * yaml_parser_set_input(). - * @param[out] buffer The buffer to write the data from the source. - * @param[in] size The size of the buffer. - * @param[out] size_read The actual number of bytes read from the source. - * - * @returns On success, the handler should return @c 1. If the handler failed, - * the returned value should be @c 0. On EOF, the handler should set the - * @a size_read to @c 0 and return @c 1. - */ - -type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) - -/** - * This structure holds information about a potential simple key. - */ - -type yaml_simple_key_t struct { - /** Is a simple key possible? */ - possible bool - - /** Is a simple key required? */ - required bool - - /** The number of the token. */ - token_number int - - /** The position mark. */ - mark YAML_mark_t -} - -/** - * The states of the parser. - */ -type yaml_parser_state_t int - -const ( - /** Expect STREAM-START. */ - yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota - /** Expect the beginning of an implicit document. */ - yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE - /** Expect DOCUMENT-START. */ - yaml_PARSE_DOCUMENT_START_STATE - /** Expect the content of a document. */ - yaml_PARSE_DOCUMENT_CONTENT_STATE - /** Expect DOCUMENT-END. */ - yaml_PARSE_DOCUMENT_END_STATE - /** Expect a block node. */ - yaml_PARSE_BLOCK_NODE_STATE - /** Expect a block node or indentless sequence. */ - yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE - /** Expect a flow node. */ - yaml_PARSE_FLOW_NODE_STATE - /** Expect the first entry of a block sequence. */ - yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE - /** Expect an entry of a block sequence. */ - yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE - /** Expect an entry of an indentless sequence. */ - yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - /** Expect the first key of a block mapping. */ - yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE - /** Expect a block mapping key. */ - yaml_PARSE_BLOCK_MAPPING_KEY_STATE - /** Expect a block mapping value. */ - yaml_PARSE_BLOCK_MAPPING_VALUE_STATE - /** Expect the first entry of a flow sequence. */ - yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE - /** Expect an entry of a flow sequence. */ - yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE - /** Expect a key of an ordered mapping. */ - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE - /** Expect a value of an ordered mapping. */ - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE - /** Expect the and of an ordered mapping entry. */ - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE - /** Expect the first key of a flow mapping. */ - yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE - /** Expect a key of a flow mapping. */ - yaml_PARSE_FLOW_MAPPING_KEY_STATE - /** Expect a value of a flow mapping. */ - yaml_PARSE_FLOW_MAPPING_VALUE_STATE - /** Expect an empty value of a flow mapping. */ - yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE - /** Expect nothing. */ - yaml_PARSE_END_STATE -) - -/** - * This structure holds aliases data. - */ - -type yaml_alias_data_t struct { - /** The anchor. */ - anchor []byte - /** The node id. */ - index int - /** The anchor mark. */ - mark YAML_mark_t -} - -/** - * The parser structure. - * - * All members are internal. Manage the structure using the @c yaml_parser_ - * family of functions. - */ - -type yaml_parser_t struct { - - /** - * @name Error handling - * @{ - */ - - /** Error type. */ - error YAML_error_type_t - /** Error description. */ - problem string - /** The byte about which the problem occured. */ - problem_offset int - /** The problematic value (@c -1 is none). */ - problem_value int - /** The problem position. */ - problem_mark YAML_mark_t - /** The error context. */ - context string - /** The context position. */ - context_mark YAML_mark_t - - /** - * @} - */ - - /** - * @name Reader stuff - * @{ - */ - - /** Read handler. */ - read_handler yaml_read_handler_t - - /** Reader input data. */ - input_reader io.Reader - input []byte - input_pos int - - /** EOF flag */ - eof bool - - /** The working buffer. */ - buffer []byte - buffer_pos int - - /* The number of unread characters in the buffer. */ - unread int - - /** The raw buffer. */ - raw_buffer []byte - raw_buffer_pos int - - /** The input encoding. */ - encoding yaml_encoding_t - - /** The offset of the current position (in bytes). */ - offset int - - /** The mark of the current position. */ - mark YAML_mark_t - - /** - * @} - */ - - /** - * @name Scanner stuff - * @{ - */ - - /** Have we started to scan the input stream? */ - stream_start_produced bool - - /** Have we reached the end of the input stream? */ - stream_end_produced bool - - /** The number of unclosed '[' and '{' indicators. */ - flow_level int - - /** The tokens queue. */ - tokens []yaml_token_t - tokens_head int - - /** The number of tokens fetched from the queue. */ - tokens_parsed int - - /* Does the tokens queue contain a token ready for dequeueing. */ - token_available bool - - /** The indentation levels stack. */ - indents []int - - /** The current indentation level. */ - indent int - - /** May a simple key occur at the current position? */ - simple_key_allowed bool - - /** The stack of simple keys. */ - simple_keys []yaml_simple_key_t - - /** - * @} - */ - - /** - * @name Parser stuff - * @{ - */ - - /** The parser states stack. */ - states []yaml_parser_state_t - - /** The current parser state. */ - state yaml_parser_state_t - - /** The stack of marks. */ - marks []YAML_mark_t - - /** The list of TAG directives. */ - tag_directives []yaml_tag_directive_t - - /** - * @} - */ - - /** - * @name Dumper stuff - * @{ - */ - - /** The alias data. */ - aliases []yaml_alias_data_t - - /** The currently parsed document. */ - document *yaml_document_t - - /** - * @} - */ - -} - -/** - * The prototype of a write handler. - * - * The write handler is called when the emitter needs to flush the accumulated - * characters to the output. The handler should write @a size bytes of the - * @a buffer to the output. - * - * @param[in,out] data A pointer to an application data specified by - * yaml_emitter_set_output(). - * @param[in] buffer The buffer with bytes to be written. - * @param[in] size The size of the buffer. - * - * @returns On success, the handler should return @c 1. If the handler failed, - * the returned value should be @c 0. - */ - -type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error - -/** The emitter states. */ -type yaml_emitter_state_t int - -const ( - /** Expect STREAM-START. */ - yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota - /** Expect the first DOCUMENT-START or STREAM-END. */ - yaml_EMIT_FIRST_DOCUMENT_START_STATE - /** Expect DOCUMENT-START or STREAM-END. */ - yaml_EMIT_DOCUMENT_START_STATE - /** Expect the content of a document. */ - yaml_EMIT_DOCUMENT_CONTENT_STATE - /** Expect DOCUMENT-END. */ - yaml_EMIT_DOCUMENT_END_STATE - /** Expect the first item of a flow sequence. */ - yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE - /** Expect an item of a flow sequence. */ - yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE - /** Expect the first key of a flow mapping. */ - yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE - /** Expect a key of a flow mapping. */ - yaml_EMIT_FLOW_MAPPING_KEY_STATE - /** Expect a value for a simple key of a flow mapping. */ - yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE - /** Expect a value of a flow mapping. */ - yaml_EMIT_FLOW_MAPPING_VALUE_STATE - /** Expect the first item of a block sequence. */ - yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE - /** Expect an item of a block sequence. */ - yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE - /** Expect the first key of a block mapping. */ - yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE - /** Expect the key of a block mapping. */ - yaml_EMIT_BLOCK_MAPPING_KEY_STATE - /** Expect a value for a simple key of a block mapping. */ - yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE - /** Expect a value of a block mapping. */ - yaml_EMIT_BLOCK_MAPPING_VALUE_STATE - /** Expect nothing. */ - yaml_EMIT_END_STATE -) - -/** - * The emitter structure. - * - * All members are internal. Manage the structure using the @c yaml_emitter_ - * family of functions. - */ - -type yaml_emitter_t struct { - - /** - * @name Error handling - * @{ - */ - - /** Error type. */ - error YAML_error_type_t - /** Error description. */ - problem string - - /** - * @} - */ - - /** - * @name Writer stuff - * @{ - */ - - /** Write handler. */ - write_handler yaml_write_handler_t - - /** Standard (string or file) output data. */ - output_buffer *[]byte - output_writer io.Writer - - /** The working buffer. */ - buffer []byte - buffer_pos int - - /** The raw buffer. */ - raw_buffer []byte - raw_buffer_pos int - - /** The stream encoding. */ - encoding yaml_encoding_t - - /** - * @} - */ - - /** - * @name Emitter stuff - * @{ - */ - - /** If the output is in the canonical style? */ - canonical bool - /** The number of indentation spaces. */ - best_indent int - /** The preferred width of the output lines. */ - best_width int - /** Allow unescaped non-ASCII characters? */ - unicode bool - /** The preferred line break. */ - line_break yaml_break_t - - /** The stack of states. */ - states []yaml_emitter_state_t - - /** The current emitter state. */ - state yaml_emitter_state_t - - /** The event queue. */ - events []yaml_event_t - events_head int - - /** The stack of indentation levels. */ - indents []int - - /** The list of tag directives. */ - tag_directives []yaml_tag_directive_t - - /** The current indentation level. */ - indent int - - /** The current flow level. */ - flow_level int - - /** Is it the document root context? */ - root_context bool - /** Is it a sequence context? */ - sequence_context bool - /** Is it a mapping context? */ - mapping_context bool - /** Is it a simple mapping key context? */ - simple_key_context bool - - /** The current line. */ - line int - /** The current column. */ - column int - /** If the last character was a whitespace? */ - whitespace bool - /** If the last character was an indentation character (' ', '-', '?', ':')? */ - indention bool - /** If an explicit document end is required? */ - open_ended bool - - /** Anchor analysis. */ - anchor_data struct { - /** The anchor value. */ - anchor []byte - /** Is it an alias? */ - alias bool - } - - /** Tag analysis. */ - tag_data struct { - /** The tag handle. */ - handle []byte - /** The tag suffix. */ - suffix []byte - } - - /** Scalar analysis. */ - scalar_data struct { - /** The scalar value. */ - value []byte - /** Does the scalar contain line breaks? */ - multiline bool - /** Can the scalar be expessed in the flow plain style? */ - flow_plain_allowed bool - /** Can the scalar be expressed in the block plain style? */ - block_plain_allowed bool - /** Can the scalar be expressed in the single quoted style? */ - single_quoted_allowed bool - /** Can the scalar be expressed in the literal or folded styles? */ - block_allowed bool - /** The output style. */ - style yaml_scalar_style_t - } - - /** - * @} - */ - - /** - * @name Dumper stuff - * @{ - */ - - /** If the stream was already opened? */ - opened bool - /** If the stream was already closed? */ - closed bool - - /** The information associated with the document nodes. */ - anchors *struct { - /** The number of references. */ - references int - /** The anchor id. */ - anchor int - /** If the node has been emitted? */ - serialized bool - } - - /** The last assigned anchor id. */ - last_anchor_id int - - /** The currently emitted document. */ - document *yaml_document_t - - /** - * @} - */ - -} diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/errors.go b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go deleted file mode 100644 index 6d9bb4b62..000000000 --- a/vendor/github.com/docker/distribution/registry/api/errcode/errors.go +++ /dev/null @@ -1,267 +0,0 @@ -package errcode - -import ( - "encoding/json" - "fmt" - "strings" -) - -// ErrorCoder is the base interface for ErrorCode and Error allowing -// users of each to just call ErrorCode to get the real ID of each -type ErrorCoder interface { - ErrorCode() ErrorCode -} - -// ErrorCode represents the error type. The errors are serialized via strings -// and the integer format may change and should *never* be exported. -type ErrorCode int - -var _ error = ErrorCode(0) - -// ErrorCode just returns itself -func (ec ErrorCode) ErrorCode() ErrorCode { - return ec -} - -// Error returns the ID/Value -func (ec ErrorCode) Error() string { - // NOTE(stevvooe): Cannot use message here since it may have unpopulated args. - return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1)) -} - -// Descriptor returns the descriptor for the error code. -func (ec ErrorCode) Descriptor() ErrorDescriptor { - d, ok := errorCodeToDescriptors[ec] - - if !ok { - return ErrorCodeUnknown.Descriptor() - } - - return d -} - -// String returns the canonical identifier for this error code. -func (ec ErrorCode) String() string { - return ec.Descriptor().Value -} - -// Message returned the human-readable error message for this error code. -func (ec ErrorCode) Message() string { - return ec.Descriptor().Message -} - -// MarshalText encodes the receiver into UTF-8-encoded text and returns the -// result. -func (ec ErrorCode) MarshalText() (text []byte, err error) { - return []byte(ec.String()), nil -} - -// UnmarshalText decodes the form generated by MarshalText. -func (ec *ErrorCode) UnmarshalText(text []byte) error { - desc, ok := idToDescriptors[string(text)] - - if !ok { - desc = ErrorCodeUnknown.Descriptor() - } - - *ec = desc.Code - - return nil -} - -// WithMessage creates a new Error struct based on the passed-in info and -// overrides the Message property. -func (ec ErrorCode) WithMessage(message string) Error { - return Error{ - Code: ec, - Message: message, - } -} - -// WithDetail creates a new Error struct based on the passed-in info and -// set the Detail property appropriately -func (ec ErrorCode) WithDetail(detail interface{}) Error { - return Error{ - Code: ec, - Message: ec.Message(), - }.WithDetail(detail) -} - -// WithArgs creates a new Error struct and sets the Args slice -func (ec ErrorCode) WithArgs(args ...interface{}) Error { - return Error{ - Code: ec, - Message: ec.Message(), - }.WithArgs(args...) -} - -// Error provides a wrapper around ErrorCode with extra Details provided. -type Error struct { - Code ErrorCode `json:"code"` - Message string `json:"message"` - Detail interface{} `json:"detail,omitempty"` - - // TODO(duglin): See if we need an "args" property so we can do the - // variable substitution right before showing the message to the user -} - -var _ error = Error{} - -// ErrorCode returns the ID/Value of this Error -func (e Error) ErrorCode() ErrorCode { - return e.Code -} - -// Error returns a human readable representation of the error. -func (e Error) Error() string { - return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message) -} - -// WithDetail will return a new Error, based on the current one, but with -// some Detail info added -func (e Error) WithDetail(detail interface{}) Error { - return Error{ - Code: e.Code, - Message: e.Message, - Detail: detail, - } -} - -// WithArgs uses the passed-in list of interface{} as the substitution -// variables in the Error's Message string, but returns a new Error -func (e Error) WithArgs(args ...interface{}) Error { - return Error{ - Code: e.Code, - Message: fmt.Sprintf(e.Code.Message(), args...), - Detail: e.Detail, - } -} - -// ErrorDescriptor provides relevant information about a given error code. -type ErrorDescriptor struct { - // Code is the error code that this descriptor describes. - Code ErrorCode - - // Value provides a unique, string key, often captilized with - // underscores, to identify the error code. This value is used as the - // keyed value when serializing api errors. - Value string - - // Message is a short, human readable decription of the error condition - // included in API responses. - Message string - - // Description provides a complete account of the errors purpose, suitable - // for use in documentation. - Description string - - // HTTPStatusCode provides the http status code that is associated with - // this error condition. - HTTPStatusCode int -} - -// ParseErrorCode returns the value by the string error code. -// `ErrorCodeUnknown` will be returned if the error is not known. -func ParseErrorCode(value string) ErrorCode { - ed, ok := idToDescriptors[value] - if ok { - return ed.Code - } - - return ErrorCodeUnknown -} - -// Errors provides the envelope for multiple errors and a few sugar methods -// for use within the application. -type Errors []error - -var _ error = Errors{} - -func (errs Errors) Error() string { - switch len(errs) { - case 0: - return "" - case 1: - return errs[0].Error() - default: - msg := "errors:\n" - for _, err := range errs { - msg += err.Error() + "\n" - } - return msg - } -} - -// Len returns the current number of errors. -func (errs Errors) Len() int { - return len(errs) -} - -// MarshalJSON converts slice of error, ErrorCode or Error into a -// slice of Error - then serializes -func (errs Errors) MarshalJSON() ([]byte, error) { - var tmpErrs struct { - Errors []Error `json:"errors,omitempty"` - } - - for _, daErr := range errs { - var err Error - - switch daErr.(type) { - case ErrorCode: - err = daErr.(ErrorCode).WithDetail(nil) - case Error: - err = daErr.(Error) - default: - err = ErrorCodeUnknown.WithDetail(daErr) - - } - - // If the Error struct was setup and they forgot to set the - // Message field (meaning its "") then grab it from the ErrCode - msg := err.Message - if msg == "" { - msg = err.Code.Message() - } - - tmpErrs.Errors = append(tmpErrs.Errors, Error{ - Code: err.Code, - Message: msg, - Detail: err.Detail, - }) - } - - return json.Marshal(tmpErrs) -} - -// UnmarshalJSON deserializes []Error and then converts it into slice of -// Error or ErrorCode -func (errs *Errors) UnmarshalJSON(data []byte) error { - var tmpErrs struct { - Errors []Error - } - - if err := json.Unmarshal(data, &tmpErrs); err != nil { - return err - } - - var newErrs Errors - for _, daErr := range tmpErrs.Errors { - // If Message is empty or exactly matches the Code's message string - // then just use the Code, no need for a full Error struct - if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) { - // Error's w/o details get converted to ErrorCode - newErrs = append(newErrs, daErr.Code) - } else { - // Error's w/ details are untouched - newErrs = append(newErrs, Error{ - Code: daErr.Code, - Message: daErr.Message, - Detail: daErr.Detail, - }) - } - } - - *errs = newErrs - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/handler.go b/vendor/github.com/docker/distribution/registry/api/errcode/handler.go deleted file mode 100644 index 49a64a86e..000000000 --- a/vendor/github.com/docker/distribution/registry/api/errcode/handler.go +++ /dev/null @@ -1,44 +0,0 @@ -package errcode - -import ( - "encoding/json" - "net/http" -) - -// ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err -// and sets the content-type header to 'application/json'. It will handle -// ErrorCoder and Errors, and if necessary will create an envelope. -func ServeJSON(w http.ResponseWriter, err error) error { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - var sc int - - switch errs := err.(type) { - case Errors: - if len(errs) < 1 { - break - } - - if err, ok := errs[0].(ErrorCoder); ok { - sc = err.ErrorCode().Descriptor().HTTPStatusCode - } - case ErrorCoder: - sc = errs.ErrorCode().Descriptor().HTTPStatusCode - err = Errors{err} // create an envelope. - default: - // We just have an unhandled error type, so just place in an envelope - // and move along. - err = Errors{err} - } - - if sc == 0 { - sc = http.StatusInternalServerError - } - - w.WriteHeader(sc) - - if err := json.NewEncoder(w).Encode(err); err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/register.go b/vendor/github.com/docker/distribution/registry/api/errcode/register.go deleted file mode 100644 index d1e8826c6..000000000 --- a/vendor/github.com/docker/distribution/registry/api/errcode/register.go +++ /dev/null @@ -1,138 +0,0 @@ -package errcode - -import ( - "fmt" - "net/http" - "sort" - "sync" -) - -var ( - errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{} - idToDescriptors = map[string]ErrorDescriptor{} - groupToDescriptors = map[string][]ErrorDescriptor{} -) - -var ( - // ErrorCodeUnknown is a generic error that can be used as a last - // resort if there is no situation-specific error message that can be used - ErrorCodeUnknown = Register("errcode", ErrorDescriptor{ - Value: "UNKNOWN", - Message: "unknown error", - Description: `Generic error returned when the error does not have an - API classification.`, - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeUnsupported is returned when an operation is not supported. - ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{ - Value: "UNSUPPORTED", - Message: "The operation is unsupported.", - Description: `The operation was unsupported due to a missing - implementation or invalid set of parameters.`, - HTTPStatusCode: http.StatusMethodNotAllowed, - }) - - // ErrorCodeUnauthorized is returned if a request requires - // authentication. - ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{ - Value: "UNAUTHORIZED", - Message: "authentication required", - Description: `The access controller was unable to authenticate - the client. Often this will be accompanied by a - Www-Authenticate HTTP response header indicating how to - authenticate.`, - HTTPStatusCode: http.StatusUnauthorized, - }) - - // ErrorCodeDenied is returned if a client does not have sufficient - // permission to perform an action. - ErrorCodeDenied = Register("errcode", ErrorDescriptor{ - Value: "DENIED", - Message: "requested access to the resource is denied", - Description: `The access controller denied access for the - operation on a resource.`, - HTTPStatusCode: http.StatusForbidden, - }) - - // ErrorCodeUnavailable provides a common error to report unavailability - // of a service or endpoint. - ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{ - Value: "UNAVAILABLE", - Message: "service unavailable", - Description: "Returned when a service is not available", - HTTPStatusCode: http.StatusServiceUnavailable, - }) - - // ErrorCodeTooManyRequests is returned if a client attempts too many - // times to contact a service endpoint. - ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{ - Value: "TOOMANYREQUESTS", - Message: "too many requests", - Description: `Returned when a client attempts to contact a - service too many times`, - HTTPStatusCode: http.StatusTooManyRequests, - }) -) - -var nextCode = 1000 -var registerLock sync.Mutex - -// Register will make the passed-in error known to the environment and -// return a new ErrorCode -func Register(group string, descriptor ErrorDescriptor) ErrorCode { - registerLock.Lock() - defer registerLock.Unlock() - - descriptor.Code = ErrorCode(nextCode) - - if _, ok := idToDescriptors[descriptor.Value]; ok { - panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value)) - } - if _, ok := errorCodeToDescriptors[descriptor.Code]; ok { - panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code)) - } - - groupToDescriptors[group] = append(groupToDescriptors[group], descriptor) - errorCodeToDescriptors[descriptor.Code] = descriptor - idToDescriptors[descriptor.Value] = descriptor - - nextCode++ - return descriptor.Code -} - -type byValue []ErrorDescriptor - -func (a byValue) Len() int { return len(a) } -func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value } - -// GetGroupNames returns the list of Error group names that are registered -func GetGroupNames() []string { - keys := []string{} - - for k := range groupToDescriptors { - keys = append(keys, k) - } - sort.Strings(keys) - return keys -} - -// GetErrorCodeGroup returns the named group of error descriptors -func GetErrorCodeGroup(name string) []ErrorDescriptor { - desc := groupToDescriptors[name] - sort.Sort(byValue(desc)) - return desc -} - -// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are -// registered, irrespective of what group they're in -func GetErrorAllDescriptors() []ErrorDescriptor { - result := []ErrorDescriptor{} - - for _, group := range GetGroupNames() { - result = append(result, GetErrorCodeGroup(group)...) - } - sort.Sort(byValue(result)) - return result -} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go b/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go deleted file mode 100644 index 9979abae6..000000000 --- a/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go +++ /dev/null @@ -1,1596 +0,0 @@ -package v2 - -import ( - "net/http" - "regexp" - - "github.com/docker/distribution/digest" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/api/errcode" -) - -var ( - nameParameterDescriptor = ParameterDescriptor{ - Name: "name", - Type: "string", - Format: reference.NameRegexp.String(), - Required: true, - Description: `Name of the target repository.`, - } - - referenceParameterDescriptor = ParameterDescriptor{ - Name: "reference", - Type: "string", - Format: reference.TagRegexp.String(), - Required: true, - Description: `Tag or digest of the target manifest.`, - } - - uuidParameterDescriptor = ParameterDescriptor{ - Name: "uuid", - Type: "opaque", - Required: true, - Description: "A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.", - } - - digestPathParameter = ParameterDescriptor{ - Name: "digest", - Type: "path", - Required: true, - Format: digest.DigestRegexp.String(), - Description: `Digest of desired blob.`, - } - - hostHeader = ParameterDescriptor{ - Name: "Host", - Type: "string", - Description: "Standard HTTP Host Header. Should be set to the registry host.", - Format: "", - Examples: []string{"registry-1.docker.io"}, - } - - authHeader = ParameterDescriptor{ - Name: "Authorization", - Type: "string", - Description: "An RFC7235 compliant authorization header.", - Format: " ", - Examples: []string{"Bearer dGhpcyBpcyBhIGZha2UgYmVhcmVyIHRva2VuIQ=="}, - } - - authChallengeHeader = ParameterDescriptor{ - Name: "WWW-Authenticate", - Type: "string", - Description: "An RFC7235 compliant authentication challenge header.", - Format: ` realm="", ..."`, - Examples: []string{ - `Bearer realm="https://auth.docker.com/", service="registry.docker.com", scopes="repository:library/ubuntu:pull"`, - }, - } - - contentLengthZeroHeader = ParameterDescriptor{ - Name: "Content-Length", - Description: "The `Content-Length` header must be zero and the body must be empty.", - Type: "integer", - Format: "0", - } - - dockerUploadUUIDHeader = ParameterDescriptor{ - Name: "Docker-Upload-UUID", - Description: "Identifies the docker upload uuid for the current request.", - Type: "uuid", - Format: "", - } - - digestHeader = ParameterDescriptor{ - Name: "Docker-Content-Digest", - Description: "Digest of the targeted content for the request.", - Type: "digest", - Format: "", - } - - linkHeader = ParameterDescriptor{ - Name: "Link", - Type: "link", - Description: "RFC5988 compliant rel='next' with URL to next result set, if available", - Format: `<?n=&last=>; rel="next"`, - } - - paginationParameters = []ParameterDescriptor{ - { - Name: "n", - Type: "integer", - Description: "Limit the number of entries in each response. It not present, all entries will be returned.", - Format: "", - Required: false, - }, - { - Name: "last", - Type: "string", - Description: "Result set will include values lexically after last.", - Format: "", - Required: false, - }, - } - - unauthorizedResponseDescriptor = ResponseDescriptor{ - Name: "Authentication Required", - StatusCode: http.StatusUnauthorized, - Description: "The client is not authenticated.", - Headers: []ParameterDescriptor{ - authChallengeHeader, - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnauthorized, - }, - } - - repositoryNotFoundResponseDescriptor = ResponseDescriptor{ - Name: "No Such Repository Error", - StatusCode: http.StatusNotFound, - Description: "The repository is not known to the registry.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - }, - } - - deniedResponseDescriptor = ResponseDescriptor{ - Name: "Access Denied", - StatusCode: http.StatusForbidden, - Description: "The client does not have required access to the repository.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeDenied, - }, - } - - tooManyRequestsDescriptor = ResponseDescriptor{ - Name: "Too Many Requests", - StatusCode: http.StatusTooManyRequests, - Description: "The client made too many requests within a time interval.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeTooManyRequests, - }, - } -) - -const ( - manifestBody = `{ - "name": , - "tag": , - "fsLayers": [ - { - "blobSum": "" - }, - ... - ] - ], - "history": , - "signature": -}` - - errorsBody = `{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -}` -) - -// APIDescriptor exports descriptions of the layout of the v2 registry API. -var APIDescriptor = struct { - // RouteDescriptors provides a list of the routes available in the API. - RouteDescriptors []RouteDescriptor -}{ - RouteDescriptors: routeDescriptors, -} - -// RouteDescriptor describes a route specified by name. -type RouteDescriptor struct { - // Name is the name of the route, as specified in RouteNameXXX exports. - // These names a should be considered a unique reference for a route. If - // the route is registered with gorilla, this is the name that will be - // used. - Name string - - // Path is a gorilla/mux-compatible regexp that can be used to match the - // route. For any incoming method and path, only one route descriptor - // should match. - Path string - - // Entity should be a short, human-readalbe description of the object - // targeted by the endpoint. - Entity string - - // Description should provide an accurate overview of the functionality - // provided by the route. - Description string - - // Methods should describe the various HTTP methods that may be used on - // this route, including request and response formats. - Methods []MethodDescriptor -} - -// MethodDescriptor provides a description of the requests that may be -// conducted with the target method. -type MethodDescriptor struct { - - // Method is an HTTP method, such as GET, PUT or POST. - Method string - - // Description should provide an overview of the functionality provided by - // the covered method, suitable for use in documentation. Use of markdown - // here is encouraged. - Description string - - // Requests is a slice of request descriptors enumerating how this - // endpoint may be used. - Requests []RequestDescriptor -} - -// RequestDescriptor covers a particular set of headers and parameters that -// can be carried out with the parent method. Its most helpful to have one -// RequestDescriptor per API use case. -type RequestDescriptor struct { - // Name provides a short identifier for the request, usable as a title or - // to provide quick context for the particular request. - Name string - - // Description should cover the requests purpose, covering any details for - // this particular use case. - Description string - - // Headers describes headers that must be used with the HTTP request. - Headers []ParameterDescriptor - - // PathParameters enumerate the parameterized path components for the - // given request, as defined in the route's regular expression. - PathParameters []ParameterDescriptor - - // QueryParameters provides a list of query parameters for the given - // request. - QueryParameters []ParameterDescriptor - - // Body describes the format of the request body. - Body BodyDescriptor - - // Successes enumerates the possible responses that are considered to be - // the result of a successful request. - Successes []ResponseDescriptor - - // Failures covers the possible failures from this particular request. - Failures []ResponseDescriptor -} - -// ResponseDescriptor describes the components of an API response. -type ResponseDescriptor struct { - // Name provides a short identifier for the response, usable as a title or - // to provide quick context for the particular response. - Name string - - // Description should provide a brief overview of the role of the - // response. - Description string - - // StatusCode specifies the status received by this particular response. - StatusCode int - - // Headers covers any headers that may be returned from the response. - Headers []ParameterDescriptor - - // Fields describes any fields that may be present in the response. - Fields []ParameterDescriptor - - // ErrorCodes enumerates the error codes that may be returned along with - // the response. - ErrorCodes []errcode.ErrorCode - - // Body describes the body of the response, if any. - Body BodyDescriptor -} - -// BodyDescriptor describes a request body and its expected content type. For -// the most part, it should be example json or some placeholder for body -// data in documentation. -type BodyDescriptor struct { - ContentType string - Format string -} - -// ParameterDescriptor describes the format of a request parameter, which may -// be a header, path parameter or query parameter. -type ParameterDescriptor struct { - // Name is the name of the parameter, either of the path component or - // query parameter. - Name string - - // Type specifies the type of the parameter, such as string, integer, etc. - Type string - - // Description provides a human-readable description of the parameter. - Description string - - // Required means the field is required when set. - Required bool - - // Format is a specifying the string format accepted by this parameter. - Format string - - // Regexp is a compiled regular expression that can be used to validate - // the contents of the parameter. - Regexp *regexp.Regexp - - // Examples provides multiple examples for the values that might be valid - // for this parameter. - Examples []string -} - -var routeDescriptors = []RouteDescriptor{ - { - Name: RouteNameBase, - Path: "/v2/", - Entity: "Base", - Description: `Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authentication.`, - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Check that the endpoint implements Docker Registry API V2.", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - Successes: []ResponseDescriptor{ - { - Description: "The API implements V2 protocol and is accessible.", - StatusCode: http.StatusOK, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "The registry does not implement the V2 API.", - StatusCode: http.StatusNotFound, - }, - unauthorizedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - }, - }, - { - Name: RouteNameTags, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/tags/list", - Entity: "Tags", - Description: "Retrieve information about tags.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Fetch the tags under the repository identified by `name`.", - Requests: []RequestDescriptor{ - { - Name: "Tags", - Description: "Return all tags for the repository", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusOK, - Description: "A list of tags for the named repository.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "name": , - "tags": [ - , - ... - ] -}`, - }, - }, - }, - Failures: []ResponseDescriptor{ - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - { - Name: "Tags Paginated", - Description: "Return a portion of the tags for the specified repository.", - PathParameters: []ParameterDescriptor{nameParameterDescriptor}, - QueryParameters: paginationParameters, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusOK, - Description: "A list of tags for the named repository.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - linkHeader, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "name": , - "tags": [ - , - ... - ], -}`, - }, - }, - }, - Failures: []ResponseDescriptor{ - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - }, - }, - { - Name: RouteNameManifest, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/manifests/{reference:" + reference.TagRegexp.String() + "|" + digest.DigestRegexp.String() + "}", - Entity: "Manifest", - Description: "Create, update, delete and retrieve manifests.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - referenceParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - Description: "The manifest identified by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image.", - StatusCode: http.StatusOK, - Headers: []ParameterDescriptor{ - digestHeader, - }, - Body: BodyDescriptor{ - ContentType: "", - Format: manifestBody, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "The name or reference was invalid.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeTagInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - { - Method: "PUT", - Description: "Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest.", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - referenceParameterDescriptor, - }, - Body: BodyDescriptor{ - ContentType: "", - Format: manifestBody, - }, - Successes: []ResponseDescriptor{ - { - Description: "The manifest has been accepted by the registry and is stored under the specified `name` and `tag`.", - StatusCode: http.StatusCreated, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Description: "The canonical location url of the uploaded manifest.", - Format: "", - }, - contentLengthZeroHeader, - digestHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Manifest", - Description: "The received manifest was invalid in some way, as described by the error codes. The client should resolve the issue and retry the request.", - StatusCode: http.StatusBadRequest, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeTagInvalid, - ErrorCodeManifestInvalid, - ErrorCodeManifestUnverified, - ErrorCodeBlobUnknown, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - { - Name: "Missing Layer(s)", - Description: "One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "errors:" [{ - "code": "BLOB_UNKNOWN", - "message": "blob unknown to registry", - "detail": { - "digest": "" - } - }, - ... - ] -}`, - }, - }, - { - Name: "Not allowed", - Description: "Manifest put is not allowed because the registry is configured as a pull-through cache or for some other reason", - StatusCode: http.StatusMethodNotAllowed, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - }, - }, - }, - }, - { - Method: "DELETE", - Description: "Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`.", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - referenceParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusAccepted, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Reference", - Description: "The specified `name` or `reference` were invalid and the delete was unable to proceed.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeTagInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - { - Name: "Unknown Manifest", - Description: "The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeManifestUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Name: "Not allowed", - Description: "Manifest delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled.", - StatusCode: http.StatusMethodNotAllowed, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - }, - }, - }, - }, - }, - }, - - { - Name: RouteNameBlob, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/{digest:" + digest.DigestRegexp.String() + "}", - Entity: "Blob", - Description: "Operations on blobs identified by `name` and `digest`. Used to fetch or delete layers by digest.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", - Requests: []RequestDescriptor{ - { - Name: "Fetch Blob", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - digestPathParameter, - }, - Successes: []ResponseDescriptor{ - { - Description: "The blob identified by `digest` is available. The blob content will be present in the body of the request.", - StatusCode: http.StatusOK, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "The length of the requested blob content.", - Format: "", - }, - digestHeader, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - }, - { - Description: "The blob identified by `digest` is available at the provided location.", - StatusCode: http.StatusTemporaryRedirect, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Description: "The location where the layer should be accessible.", - Format: "", - }, - digestHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeDigestInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", - StatusCode: http.StatusNotFound, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeBlobUnknown, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - { - Name: "Fetch Blob Part", - Description: "This endpoint may also support RFC7233 compliant range requests. Support can be detected by issuing a HEAD request. If the header `Accept-Range: bytes` is returned, range requests can be used to fetch partial content.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - { - Name: "Range", - Type: "string", - Description: "HTTP Range header specifying blob chunk.", - Format: "bytes=-", - }, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - digestPathParameter, - }, - Successes: []ResponseDescriptor{ - { - Description: "The blob identified by `digest` is available. The specified chunk of blob content will be present in the body of the request.", - StatusCode: http.StatusPartialContent, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "The length of the requested blob chunk.", - Format: "", - }, - { - Name: "Content-Range", - Type: "byte range", - Description: "Content range of blob chunk.", - Format: "bytes -/", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeDigestInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeBlobUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The range specification cannot be satisfied for the requested content. This can happen when the range is not formatted correctly or if the range is outside of the valid size of the content.", - StatusCode: http.StatusRequestedRangeNotSatisfiable, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - { - Method: "DELETE", - Description: "Delete the blob identified by `name` and `digest`", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - digestPathParameter, - }, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusAccepted, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "0", - Format: "0", - }, - digestHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Digest", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - }, - }, - { - Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", - StatusCode: http.StatusNotFound, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeBlobUnknown, - }, - }, - { - Description: "Blob delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled", - StatusCode: http.StatusMethodNotAllowed, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - - // TODO(stevvooe): We may want to add a PUT request here to - // kickoff an upload of a blob, integrated with the blob upload - // API. - }, - }, - - { - Name: RouteNameBlobUpload, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/", - Entity: "Initiate Blob Upload", - Description: "Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads.", - Methods: []MethodDescriptor{ - { - Method: "POST", - Description: "Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request.", - Requests: []RequestDescriptor{ - { - Name: "Initiate Monolithic Blob Upload", - Description: "Upload a blob identified by the `digest` parameter in single request. This upload will not be resumable unless a recoverable error is returned.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - { - Name: "Content-Length", - Type: "integer", - Format: "", - }, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - }, - QueryParameters: []ParameterDescriptor{ - { - Name: "digest", - Type: "query", - Format: "", - Regexp: digest.DigestRegexp, - Description: `Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.`, - }, - }, - Body: BodyDescriptor{ - ContentType: "application/octect-stream", - Format: "", - }, - Successes: []ResponseDescriptor{ - { - Description: "The blob has been created in the registry and is available at the provided location.", - StatusCode: http.StatusCreated, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Digest", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - }, - }, - { - Name: "Not allowed", - Description: "Blob upload is not allowed because the registry is configured as a pull-through cache or for some other reason", - StatusCode: http.StatusMethodNotAllowed, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - { - Name: "Initiate Resumable Blob Upload", - Description: "Initiate a resumable blob upload with an empty request body.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - contentLengthZeroHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - Description: "The upload has been created. The `Location` header must be used to complete the upload. The response should be identical to a `GET` request on the contents of the returned `Location` header.", - StatusCode: http.StatusAccepted, - Headers: []ParameterDescriptor{ - contentLengthZeroHeader, - { - Name: "Location", - Type: "url", - Format: "/v2//blobs/uploads/", - Description: "The location of the created upload. Clients should use the contents verbatim to complete the upload, adding parameters where required.", - }, - { - Name: "Range", - Format: "0-0", - Description: "Range header indicating the progress of the upload. When starting an upload, it will return an empty range, since no content has been received.", - }, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Digest", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - { - Name: "Mount Blob", - Description: "Mount a blob identified by the `mount` parameter from another repository.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - contentLengthZeroHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - }, - QueryParameters: []ParameterDescriptor{ - { - Name: "mount", - Type: "query", - Format: "", - Regexp: digest.DigestRegexp, - Description: `Digest of blob to mount from the source repository.`, - }, - { - Name: "from", - Type: "query", - Format: "", - Regexp: reference.NameRegexp, - Description: `Name of the source repository.`, - }, - }, - Successes: []ResponseDescriptor{ - { - Description: "The blob has been mounted in the repository and is available at the provided location.", - StatusCode: http.StatusCreated, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Digest", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - }, - }, - { - Name: "Not allowed", - Description: "Blob mount is not allowed because the registry is configured as a pull-through cache or for some other reason", - StatusCode: http.StatusMethodNotAllowed, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - }, - }, - - { - Name: RouteNameBlobUploadChunk, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/{uuid:[a-zA-Z0-9-_.=]+}", - Entity: "Blob Upload", - Description: "Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload.", - Requests: []RequestDescriptor{ - { - Description: "Retrieve the progress of the current upload, as reported by the `Range` header.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - Name: "Upload Progress", - Description: "The upload is known and in progress. The last received offset is available in the `Range` header.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - { - Name: "Range", - Type: "header", - Format: "0-", - Description: "Range indicating the current progress of the upload.", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was an error processing the upload and it must be restarted.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The upload must be restarted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - { - Method: "PATCH", - Description: "Upload a chunk of data for the specified upload.", - Requests: []RequestDescriptor{ - { - Name: "Stream upload", - Description: "Upload a stream of data to upload without completing the upload.", - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - Successes: []ResponseDescriptor{ - { - Name: "Data Accepted", - Description: "The stream of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "/v2//blobs/uploads/", - Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", - }, - { - Name: "Range", - Type: "header", - Format: "0-", - Description: "Range indicating the current progress of the upload.", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was an error processing the upload and it must be restarted.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The upload must be restarted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - { - Name: "Chunked upload", - Description: "Upload a chunk of data to specified upload without completing the upload. The data will be uploaded to the specified Content Range.", - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - { - Name: "Content-Range", - Type: "header", - Format: "-", - Required: true, - Description: "Range of bytes identifying the desired block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header.", - }, - { - Name: "Content-Length", - Type: "integer", - Format: "", - Description: "Length of the chunk being uploaded, corresponding the length of the request body.", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - Successes: []ResponseDescriptor{ - { - Name: "Chunk Accepted", - Description: "The chunk of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "/v2//blobs/uploads/", - Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", - }, - { - Name: "Range", - Type: "header", - Format: "0-", - Description: "Range indicating the current progress of the upload.", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was an error processing the upload and it must be restarted.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The upload must be restarted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid.", - StatusCode: http.StatusRequestedRangeNotSatisfiable, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - { - Method: "PUT", - Description: "Complete the upload specified by `uuid`, optionally appending the body as the final chunk.", - Requests: []RequestDescriptor{ - { - Description: "Complete the upload, providing all the data in the body, if necessary. A request without a body will just complete the upload with previously uploaded content.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - { - Name: "Content-Length", - Type: "integer", - Format: "", - Description: "Length of the data being uploaded, corresponding to the length of the request body. May be zero if no data is provided.", - }, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - QueryParameters: []ParameterDescriptor{ - { - Name: "digest", - Type: "string", - Format: "", - Regexp: digest.DigestRegexp, - Required: true, - Description: `Digest of uploaded blob.`, - }, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - Successes: []ResponseDescriptor{ - { - Name: "Upload Complete", - Description: "The upload has been completed and accepted by the registry. The canonical location will be available in the `Location` header.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "", - Description: "The canonical location of the blob for retrieval", - }, - { - Name: "Content-Range", - Type: "header", - Format: "-", - Description: "Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.", - }, - contentLengthZeroHeader, - digestHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was an error processing the upload and it must be restarted.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - errcode.ErrorCodeUnsupported, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The upload must be restarted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - { - Method: "DELETE", - Description: "Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout.", - Requests: []RequestDescriptor{ - { - Description: "Cancel the upload specified by `uuid`.", - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - contentLengthZeroHeader, - }, - Successes: []ResponseDescriptor{ - { - Name: "Upload Deleted", - Description: "The upload has been successfully deleted.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - contentLengthZeroHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "An error was encountered processing the delete. The client may ignore this error.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - }, - }, - { - Name: RouteNameCatalog, - Path: "/v2/_catalog", - Entity: "Catalog", - Description: "List a set of available repositories in the local registry cluster. Does not provide any indication of what may be available upstream. Applications can only determine if a repository is available but not if it is not available.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Retrieve a sorted, json list of repositories available in the registry.", - Requests: []RequestDescriptor{ - { - Name: "Catalog Fetch", - Description: "Request an unabridged list of repositories available. The implementation may impose a maximum limit and return a partial set with pagination links.", - Successes: []ResponseDescriptor{ - { - Description: "Returns the unabridged list of repositories as a json response.", - StatusCode: http.StatusOK, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "repositories": [ - , - ... - ] -}`, - }, - }, - }, - }, - { - Name: "Catalog Fetch Paginated", - Description: "Return the specified portion of repositories.", - QueryParameters: paginationParameters, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusOK, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "repositories": [ - , - ... - ] - "next": "?last=&n=" -}`, - }, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - linkHeader, - }, - }, - }, - }, - }, - }, - }, - }, -} - -var routeDescriptorsMap map[string]RouteDescriptor - -func init() { - routeDescriptorsMap = make(map[string]RouteDescriptor, len(routeDescriptors)) - - for _, descriptor := range routeDescriptors { - routeDescriptorsMap[descriptor.Name] = descriptor - } -} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/doc.go b/vendor/github.com/docker/distribution/registry/api/v2/doc.go deleted file mode 100644 index cde011959..000000000 --- a/vendor/github.com/docker/distribution/registry/api/v2/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Package v2 describes routes, urls and the error codes used in the Docker -// Registry JSON HTTP API V2. In addition to declarations, descriptors are -// provided for routes and error codes that can be used for implementation and -// automatically generating documentation. -// -// Definitions here are considered to be locked down for the V2 registry api. -// Any changes must be considered carefully and should not proceed without a -// change proposal in docker core. -package v2 diff --git a/vendor/github.com/docker/distribution/registry/api/v2/errors.go b/vendor/github.com/docker/distribution/registry/api/v2/errors.go deleted file mode 100644 index 97d6923aa..000000000 --- a/vendor/github.com/docker/distribution/registry/api/v2/errors.go +++ /dev/null @@ -1,136 +0,0 @@ -package v2 - -import ( - "net/http" - - "github.com/docker/distribution/registry/api/errcode" -) - -const errGroup = "registry.api.v2" - -var ( - // ErrorCodeDigestInvalid is returned when uploading a blob if the - // provided digest does not match the blob contents. - ErrorCodeDigestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "DIGEST_INVALID", - Message: "provided digest did not match uploaded content", - Description: `When a blob is uploaded, the registry will check that - the content matches the digest provided by the client. The error may - include a detail structure with the key "digest", including the - invalid digest string. This error may also be returned when a manifest - includes an invalid layer digest.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeSizeInvalid is returned when uploading a blob if the provided - ErrorCodeSizeInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "SIZE_INVALID", - Message: "provided length did not match content length", - Description: `When a layer is uploaded, the provided size will be - checked against the uploaded content. If they do not match, this error - will be returned.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeNameInvalid is returned when the name in the manifest does not - // match the provided name. - ErrorCodeNameInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NAME_INVALID", - Message: "invalid repository name", - Description: `Invalid repository name encountered either during - manifest validation or any API operation.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeTagInvalid is returned when the tag in the manifest does not - // match the provided tag. - ErrorCodeTagInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "TAG_INVALID", - Message: "manifest tag did not match URI", - Description: `During a manifest upload, if the tag in the manifest - does not match the uri tag, this error will be returned.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeNameUnknown when the repository name is not known. - ErrorCodeNameUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NAME_UNKNOWN", - Message: "repository name not known to registry", - Description: `This is returned if the name used during an operation is - unknown to the registry.`, - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeManifestUnknown returned when image manifest is unknown. - ErrorCodeManifestUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MANIFEST_UNKNOWN", - Message: "manifest unknown", - Description: `This error is returned when the manifest, identified by - name and tag is unknown to the repository.`, - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeManifestInvalid returned when an image manifest is invalid, - // typically during a PUT operation. This error encompasses all errors - // encountered during manifest validation that aren't signature errors. - ErrorCodeManifestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MANIFEST_INVALID", - Message: "manifest invalid", - Description: `During upload, manifests undergo several checks ensuring - validity. If those checks fail, this error may be returned, unless a - more specific error is included. The detail will contain information - the failed validation.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeManifestUnverified is returned when the manifest fails - // signature verification. - ErrorCodeManifestUnverified = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MANIFEST_UNVERIFIED", - Message: "manifest failed signature verification", - Description: `During manifest upload, if the manifest fails signature - verification, this error will be returned.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeManifestBlobUnknown is returned when a manifest blob is - // unknown to the registry. - ErrorCodeManifestBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MANIFEST_BLOB_UNKNOWN", - Message: "blob unknown to registry", - Description: `This error may be returned when a manifest blob is - unknown to the registry.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeBlobUnknown is returned when a blob is unknown to the - // registry. This can happen when the manifest references a nonexistent - // layer or the result is not found by a blob fetch. - ErrorCodeBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "BLOB_UNKNOWN", - Message: "blob unknown to registry", - Description: `This error may be returned when a blob is unknown to the - registry in a specified repository. This can be returned with a - standard get or if a manifest references an unknown layer during - upload.`, - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeBlobUploadUnknown is returned when an upload is unknown. - ErrorCodeBlobUploadUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "BLOB_UPLOAD_UNKNOWN", - Message: "blob upload unknown to registry", - Description: `If a blob upload has been cancelled or was never - started, this error code may be returned.`, - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeBlobUploadInvalid is returned when an upload is invalid. - ErrorCodeBlobUploadInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "BLOB_UPLOAD_INVALID", - Message: "blob upload invalid", - Description: `The blob upload encountered an error and can no - longer proceed.`, - HTTPStatusCode: http.StatusNotFound, - }) -) diff --git a/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go b/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go deleted file mode 100644 index 9bc41a3a6..000000000 --- a/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go +++ /dev/null @@ -1,161 +0,0 @@ -package v2 - -import ( - "fmt" - "regexp" - "strings" - "unicode" -) - -var ( - // according to rfc7230 - reToken = regexp.MustCompile(`^[^"(),/:;<=>?@[\]{}[:space:][:cntrl:]]+`) - reQuotedValue = regexp.MustCompile(`^[^\\"]+`) - reEscapedCharacter = regexp.MustCompile(`^[[:blank:][:graph:]]`) -) - -// parseForwardedHeader is a benevolent parser of Forwarded header defined in rfc7239. The header contains -// a comma-separated list of forwarding key-value pairs. Each list element is set by single proxy. The -// function parses only the first element of the list, which is set by the very first proxy. It returns a map -// of corresponding key-value pairs and an unparsed slice of the input string. -// -// Examples of Forwarded header values: -// -// 1. Forwarded: For=192.0.2.43; Proto=https,For="[2001:db8:cafe::17]",For=unknown -// 2. Forwarded: for="192.0.2.43:443"; host="registry.example.org", for="10.10.05.40:80" -// -// The first will be parsed into {"for": "192.0.2.43", "proto": "https"} while the second into -// {"for": "192.0.2.43:443", "host": "registry.example.org"}. -func parseForwardedHeader(forwarded string) (map[string]string, string, error) { - // Following are states of forwarded header parser. Any state could transition to a failure. - const ( - // terminating state; can transition to Parameter - stateElement = iota - // terminating state; can transition to KeyValueDelimiter - stateParameter - // can transition to Value - stateKeyValueDelimiter - // can transition to one of { QuotedValue, PairEnd } - stateValue - // can transition to one of { EscapedCharacter, PairEnd } - stateQuotedValue - // can transition to one of { QuotedValue } - stateEscapedCharacter - // terminating state; can transition to one of { Parameter, Element } - statePairEnd - ) - - var ( - parameter string - value string - parse = forwarded[:] - res = map[string]string{} - state = stateElement - ) - -Loop: - for { - // skip spaces unless in quoted value - if state != stateQuotedValue && state != stateEscapedCharacter { - parse = strings.TrimLeftFunc(parse, unicode.IsSpace) - } - - if len(parse) == 0 { - if state != stateElement && state != statePairEnd && state != stateParameter { - return nil, parse, fmt.Errorf("unexpected end of input") - } - // terminating - break - } - - switch state { - // terminate at list element delimiter - case stateElement: - if parse[0] == ',' { - parse = parse[1:] - break Loop - } - state = stateParameter - - // parse parameter (the key of key-value pair) - case stateParameter: - match := reToken.FindString(parse) - if len(match) == 0 { - return nil, parse, fmt.Errorf("failed to parse token at position %d", len(forwarded)-len(parse)) - } - parameter = strings.ToLower(match) - parse = parse[len(match):] - state = stateKeyValueDelimiter - - // parse '=' - case stateKeyValueDelimiter: - if parse[0] != '=' { - return nil, parse, fmt.Errorf("expected '=', not '%c' at position %d", parse[0], len(forwarded)-len(parse)) - } - parse = parse[1:] - state = stateValue - - // parse value or quoted value - case stateValue: - if parse[0] == '"' { - parse = parse[1:] - state = stateQuotedValue - } else { - value = reToken.FindString(parse) - if len(value) == 0 { - return nil, parse, fmt.Errorf("failed to parse value at position %d", len(forwarded)-len(parse)) - } - if _, exists := res[parameter]; exists { - return nil, parse, fmt.Errorf("duplicate parameter %q at position %d", parameter, len(forwarded)-len(parse)) - } - res[parameter] = value - parse = parse[len(value):] - value = "" - state = statePairEnd - } - - // parse a part of quoted value until the first backslash - case stateQuotedValue: - match := reQuotedValue.FindString(parse) - value += match - parse = parse[len(match):] - switch { - case len(parse) == 0: - return nil, parse, fmt.Errorf("unterminated quoted string") - case parse[0] == '"': - res[parameter] = value - value = "" - parse = parse[1:] - state = statePairEnd - case parse[0] == '\\': - parse = parse[1:] - state = stateEscapedCharacter - } - - // parse escaped character in a quoted string, ignore the backslash - // transition back to QuotedValue state - case stateEscapedCharacter: - c := reEscapedCharacter.FindString(parse) - if len(c) == 0 { - return nil, parse, fmt.Errorf("invalid escape sequence at position %d", len(forwarded)-len(parse)-1) - } - value += c - parse = parse[1:] - state = stateQuotedValue - - // expect either a new key-value pair, new list or end of input - case statePairEnd: - switch parse[0] { - case ';': - parse = parse[1:] - state = stateParameter - case ',': - state = stateElement - default: - return nil, parse, fmt.Errorf("expected ',' or ';', not %c at position %d", parse[0], len(forwarded)-len(parse)) - } - } - } - - return res, parse, nil -} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/routes.go b/vendor/github.com/docker/distribution/registry/api/v2/routes.go deleted file mode 100644 index 5b80d5be7..000000000 --- a/vendor/github.com/docker/distribution/registry/api/v2/routes.go +++ /dev/null @@ -1,49 +0,0 @@ -package v2 - -import "github.com/gorilla/mux" - -// The following are definitions of the name under which all V2 routes are -// registered. These symbols can be used to look up a route based on the name. -const ( - RouteNameBase = "base" - RouteNameManifest = "manifest" - RouteNameTags = "tags" - RouteNameBlob = "blob" - RouteNameBlobUpload = "blob-upload" - RouteNameBlobUploadChunk = "blob-upload-chunk" - RouteNameCatalog = "catalog" -) - -var allEndpoints = []string{ - RouteNameManifest, - RouteNameCatalog, - RouteNameTags, - RouteNameBlob, - RouteNameBlobUpload, - RouteNameBlobUploadChunk, -} - -// Router builds a gorilla router with named routes for the various API -// methods. This can be used directly by both server implementations and -// clients. -func Router() *mux.Router { - return RouterWithPrefix("") -} - -// RouterWithPrefix builds a gorilla router with a configured prefix -// on all routes. -func RouterWithPrefix(prefix string) *mux.Router { - rootRouter := mux.NewRouter() - router := rootRouter - if prefix != "" { - router = router.PathPrefix(prefix).Subrouter() - } - - router.StrictSlash(true) - - for _, descriptor := range routeDescriptors { - router.Path(descriptor.Path).Name(descriptor.Name) - } - - return rootRouter -} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/urls.go b/vendor/github.com/docker/distribution/registry/api/v2/urls.go deleted file mode 100644 index 5e24ca9b5..000000000 --- a/vendor/github.com/docker/distribution/registry/api/v2/urls.go +++ /dev/null @@ -1,263 +0,0 @@ -package v2 - -import ( - "net/http" - "net/url" - "strings" - - "github.com/docker/distribution/reference" - "github.com/gorilla/mux" -) - -// URLBuilder creates registry API urls from a single base endpoint. It can be -// used to create urls for use in a registry client or server. -// -// All urls will be created from the given base, including the api version. -// For example, if a root of "/foo/" is provided, urls generated will be fall -// under "/foo/v2/...". Most application will only provide a schema, host and -// port, such as "https://localhost:5000/". -type URLBuilder struct { - root *url.URL // url root (ie http://localhost/) - router *mux.Router - relative bool -} - -// NewURLBuilder creates a URLBuilder with provided root url object. -func NewURLBuilder(root *url.URL, relative bool) *URLBuilder { - return &URLBuilder{ - root: root, - router: Router(), - relative: relative, - } -} - -// NewURLBuilderFromString workes identically to NewURLBuilder except it takes -// a string argument for the root, returning an error if it is not a valid -// url. -func NewURLBuilderFromString(root string, relative bool) (*URLBuilder, error) { - u, err := url.Parse(root) - if err != nil { - return nil, err - } - - return NewURLBuilder(u, relative), nil -} - -// NewURLBuilderFromRequest uses information from an *http.Request to -// construct the root url. -func NewURLBuilderFromRequest(r *http.Request, relative bool) *URLBuilder { - var ( - scheme = "http" - host = r.Host - ) - - if r.TLS != nil { - scheme = "https" - } else if len(r.URL.Scheme) > 0 { - scheme = r.URL.Scheme - } - - // Handle fowarded headers - // Prefer "Forwarded" header as defined by rfc7239 if given - // see https://tools.ietf.org/html/rfc7239 - if forwarded := r.Header.Get("Forwarded"); len(forwarded) > 0 { - forwardedHeader, _, err := parseForwardedHeader(forwarded) - if err == nil { - if fproto := forwardedHeader["proto"]; len(fproto) > 0 { - scheme = fproto - } - if fhost := forwardedHeader["host"]; len(fhost) > 0 { - host = fhost - } - } - } else { - if forwardedProto := r.Header.Get("X-Forwarded-Proto"); len(forwardedProto) > 0 { - scheme = forwardedProto - } - if forwardedHost := r.Header.Get("X-Forwarded-Host"); len(forwardedHost) > 0 { - // According to the Apache mod_proxy docs, X-Forwarded-Host can be a - // comma-separated list of hosts, to which each proxy appends the - // requested host. We want to grab the first from this comma-separated - // list. - hosts := strings.SplitN(forwardedHost, ",", 2) - host = strings.TrimSpace(hosts[0]) - } - } - - basePath := routeDescriptorsMap[RouteNameBase].Path - - requestPath := r.URL.Path - index := strings.Index(requestPath, basePath) - - u := &url.URL{ - Scheme: scheme, - Host: host, - } - - if index > 0 { - // N.B. index+1 is important because we want to include the trailing / - u.Path = requestPath[0 : index+1] - } - - return NewURLBuilder(u, relative) -} - -// BuildBaseURL constructs a base url for the API, typically just "/v2/". -func (ub *URLBuilder) BuildBaseURL() (string, error) { - route := ub.cloneRoute(RouteNameBase) - - baseURL, err := route.URL() - if err != nil { - return "", err - } - - return baseURL.String(), nil -} - -// BuildCatalogURL constructs a url get a catalog of repositories -func (ub *URLBuilder) BuildCatalogURL(values ...url.Values) (string, error) { - route := ub.cloneRoute(RouteNameCatalog) - - catalogURL, err := route.URL() - if err != nil { - return "", err - } - - return appendValuesURL(catalogURL, values...).String(), nil -} - -// BuildTagsURL constructs a url to list the tags in the named repository. -func (ub *URLBuilder) BuildTagsURL(name reference.Named) (string, error) { - route := ub.cloneRoute(RouteNameTags) - - tagsURL, err := route.URL("name", name.Name()) - if err != nil { - return "", err - } - - return tagsURL.String(), nil -} - -// BuildManifestURL constructs a url for the manifest identified by name and -// reference. The argument reference may be either a tag or digest. -func (ub *URLBuilder) BuildManifestURL(ref reference.Named) (string, error) { - route := ub.cloneRoute(RouteNameManifest) - - tagOrDigest := "" - switch v := ref.(type) { - case reference.Tagged: - tagOrDigest = v.Tag() - case reference.Digested: - tagOrDigest = v.Digest().String() - } - - manifestURL, err := route.URL("name", ref.Name(), "reference", tagOrDigest) - if err != nil { - return "", err - } - - return manifestURL.String(), nil -} - -// BuildBlobURL constructs the url for the blob identified by name and dgst. -func (ub *URLBuilder) BuildBlobURL(ref reference.Canonical) (string, error) { - route := ub.cloneRoute(RouteNameBlob) - - layerURL, err := route.URL("name", ref.Name(), "digest", ref.Digest().String()) - if err != nil { - return "", err - } - - return layerURL.String(), nil -} - -// BuildBlobUploadURL constructs a url to begin a blob upload in the -// repository identified by name. -func (ub *URLBuilder) BuildBlobUploadURL(name reference.Named, values ...url.Values) (string, error) { - route := ub.cloneRoute(RouteNameBlobUpload) - - uploadURL, err := route.URL("name", name.Name()) - if err != nil { - return "", err - } - - return appendValuesURL(uploadURL, values...).String(), nil -} - -// BuildBlobUploadChunkURL constructs a url for the upload identified by uuid, -// including any url values. This should generally not be used by clients, as -// this url is provided by server implementations during the blob upload -// process. -func (ub *URLBuilder) BuildBlobUploadChunkURL(name reference.Named, uuid string, values ...url.Values) (string, error) { - route := ub.cloneRoute(RouteNameBlobUploadChunk) - - uploadURL, err := route.URL("name", name.Name(), "uuid", uuid) - if err != nil { - return "", err - } - - return appendValuesURL(uploadURL, values...).String(), nil -} - -// clondedRoute returns a clone of the named route from the router. Routes -// must be cloned to avoid modifying them during url generation. -func (ub *URLBuilder) cloneRoute(name string) clonedRoute { - route := new(mux.Route) - root := new(url.URL) - - *route = *ub.router.GetRoute(name) // clone the route - *root = *ub.root - - return clonedRoute{Route: route, root: root, relative: ub.relative} -} - -type clonedRoute struct { - *mux.Route - root *url.URL - relative bool -} - -func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) { - routeURL, err := cr.Route.URL(pairs...) - if err != nil { - return nil, err - } - - if cr.relative { - return routeURL, nil - } - - if routeURL.Scheme == "" && routeURL.User == nil && routeURL.Host == "" { - routeURL.Path = routeURL.Path[1:] - } - - url := cr.root.ResolveReference(routeURL) - url.Scheme = cr.root.Scheme - return url, nil -} - -// appendValuesURL appends the parameters to the url. -func appendValuesURL(u *url.URL, values ...url.Values) *url.URL { - merged := u.Query() - - for _, v := range values { - for k, vv := range v { - merged[k] = append(merged[k], vv...) - } - } - - u.RawQuery = merged.Encode() - return u -} - -// appendValues appends the parameters to the url. Panics if the string is not -// a url. -func appendValues(u string, values ...url.Values) string { - up, err := url.Parse(u) - - if err != nil { - panic(err) // should never happen - } - - return appendValuesURL(up, values...).String() -} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go deleted file mode 100644 index 2c3ebe165..000000000 --- a/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go +++ /dev/null @@ -1,27 +0,0 @@ -package challenge - -import ( - "net/url" - "strings" -) - -// FROM: https://golang.org/src/net/http/http.go -// Given a string of the form "host", "host:port", or "[ipv6::address]:port", -// return true if the string includes a port. -func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } - -// FROM: http://golang.org/src/net/http/transport.go -var portMap = map[string]string{ - "http": "80", - "https": "443", -} - -// canonicalAddr returns url.Host but always with a ":port" suffix -// FROM: http://golang.org/src/net/http/transport.go -func canonicalAddr(url *url.URL) string { - addr := url.Host - if !hasPort(addr) { - return addr + ":" + portMap[url.Scheme] - } - return addr -} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go deleted file mode 100644 index c9bdfc355..000000000 --- a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go +++ /dev/null @@ -1,237 +0,0 @@ -package challenge - -import ( - "fmt" - "net/http" - "net/url" - "strings" - "sync" -) - -// Challenge carries information from a WWW-Authenticate response header. -// See RFC 2617. -type Challenge struct { - // Scheme is the auth-scheme according to RFC 2617 - Scheme string - - // Parameters are the auth-params according to RFC 2617 - Parameters map[string]string -} - -// Manager manages the challenges for endpoints. -// The challenges are pulled out of HTTP responses. Only -// responses which expect challenges should be added to -// the manager, since a non-unauthorized request will be -// viewed as not requiring challenges. -type Manager interface { - // GetChallenges returns the challenges for the given - // endpoint URL. - GetChallenges(endpoint url.URL) ([]Challenge, error) - - // AddResponse adds the response to the challenge - // manager. The challenges will be parsed out of - // the WWW-Authenicate headers and added to the - // URL which was produced the response. If the - // response was authorized, any challenges for the - // endpoint will be cleared. - AddResponse(resp *http.Response) error -} - -// NewSimpleManager returns an instance of -// Manger which only maps endpoints to challenges -// based on the responses which have been added the -// manager. The simple manager will make no attempt to -// perform requests on the endpoints or cache the responses -// to a backend. -func NewSimpleManager() Manager { - return &simpleManager{ - Challanges: make(map[string][]Challenge), - } -} - -type simpleManager struct { - sync.RWMutex - Challanges map[string][]Challenge -} - -func normalizeURL(endpoint *url.URL) { - endpoint.Host = strings.ToLower(endpoint.Host) - endpoint.Host = canonicalAddr(endpoint) -} - -func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) { - normalizeURL(&endpoint) - - m.RLock() - defer m.RUnlock() - challenges := m.Challanges[endpoint.String()] - return challenges, nil -} - -func (m *simpleManager) AddResponse(resp *http.Response) error { - challenges := ResponseChallenges(resp) - if resp.Request == nil { - return fmt.Errorf("missing request reference") - } - urlCopy := url.URL{ - Path: resp.Request.URL.Path, - Host: resp.Request.URL.Host, - Scheme: resp.Request.URL.Scheme, - } - normalizeURL(&urlCopy) - - m.Lock() - defer m.Unlock() - m.Challanges[urlCopy.String()] = challenges - return nil -} - -// Octet types from RFC 2616. -type octetType byte - -var octetTypes [256]octetType - -const ( - isToken octetType = 1 << iota - isSpace -) - -func init() { - // OCTET = - // CHAR = - // CTL = - // CR = - // LF = - // SP = - // HT = - // <"> = - // CRLF = CR LF - // LWS = [CRLF] 1*( SP | HT ) - // TEXT = - // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> - // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT - // token = 1* - // qdtext = > - - for c := 0; c < 256; c++ { - var t octetType - isCtl := c <= 31 || c == 127 - isChar := 0 <= c && c <= 127 - isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 - if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { - t |= isSpace - } - if isChar && !isCtl && !isSeparator { - t |= isToken - } - octetTypes[c] = t - } -} - -// ResponseChallenges returns a list of authorization challenges -// for the given http Response. Challenges are only checked if -// the response status code was a 401. -func ResponseChallenges(resp *http.Response) []Challenge { - if resp.StatusCode == http.StatusUnauthorized { - // Parse the WWW-Authenticate Header and store the challenges - // on this endpoint object. - return parseAuthHeader(resp.Header) - } - - return nil -} - -func parseAuthHeader(header http.Header) []Challenge { - challenges := []Challenge{} - for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { - v, p := parseValueAndParams(h) - if v != "" { - challenges = append(challenges, Challenge{Scheme: v, Parameters: p}) - } - } - return challenges -} - -func parseValueAndParams(header string) (value string, params map[string]string) { - params = make(map[string]string) - value, s := expectToken(header) - if value == "" { - return - } - value = strings.ToLower(value) - s = "," + skipSpace(s) - for strings.HasPrefix(s, ",") { - var pkey string - pkey, s = expectToken(skipSpace(s[1:])) - if pkey == "" { - return - } - if !strings.HasPrefix(s, "=") { - return - } - var pvalue string - pvalue, s = expectTokenOrQuoted(s[1:]) - if pvalue == "" { - return - } - pkey = strings.ToLower(pkey) - params[pkey] = pvalue - s = skipSpace(s) - } - return -} - -func skipSpace(s string) (rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isSpace == 0 { - break - } - } - return s[i:] -} - -func expectToken(s string) (token, rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isToken == 0 { - break - } - } - return s[:i], s[i:] -} - -func expectTokenOrQuoted(s string) (value string, rest string) { - if !strings.HasPrefix(s, "\"") { - return expectToken(s) - } - s = s[1:] - for i := 0; i < len(s); i++ { - switch s[i] { - case '"': - return s[:i], s[i+1:] - case '\\': - p := make([]byte, len(s)-1) - j := copy(p, s[:i]) - escape := true - for i = i + 1; i < len(s); i++ { - b := s[i] - switch { - case escape: - escape = false - p[j] = b - j++ - case b == '\\': - escape = true - case b == '"': - return string(p[:j]), s[i+1:] - default: - p[j] = b - j++ - } - } - return "", "" - } - } - return "", "" -} diff --git a/vendor/github.com/docker/distribution/registry/client/blob_writer.go b/vendor/github.com/docker/distribution/registry/client/blob_writer.go deleted file mode 100644 index e3ffcb00f..000000000 --- a/vendor/github.com/docker/distribution/registry/client/blob_writer.go +++ /dev/null @@ -1,162 +0,0 @@ -package client - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" -) - -type httpBlobUpload struct { - statter distribution.BlobStatter - client *http.Client - - uuid string - startedAt time.Time - - location string // always the last value of the location header. - offset int64 - closed bool -} - -func (hbu *httpBlobUpload) Reader() (io.ReadCloser, error) { - panic("Not implemented") -} - -func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error { - if resp.StatusCode == http.StatusNotFound { - return distribution.ErrBlobUploadUnknown - } - return HandleErrorResponse(resp) -} - -func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) { - req, err := http.NewRequest("PATCH", hbu.location, ioutil.NopCloser(r)) - if err != nil { - return 0, err - } - defer req.Body.Close() - - resp, err := hbu.client.Do(req) - if err != nil { - return 0, err - } - - if !SuccessStatus(resp.StatusCode) { - return 0, hbu.handleErrorResponse(resp) - } - - hbu.uuid = resp.Header.Get("Docker-Upload-UUID") - hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) - if err != nil { - return 0, err - } - rng := resp.Header.Get("Range") - var start, end int64 - if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { - return 0, err - } else if n != 2 || end < start { - return 0, fmt.Errorf("bad range format: %s", rng) - } - - return (end - start + 1), nil - -} - -func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) { - req, err := http.NewRequest("PATCH", hbu.location, bytes.NewReader(p)) - if err != nil { - return 0, err - } - req.Header.Set("Content-Range", fmt.Sprintf("%d-%d", hbu.offset, hbu.offset+int64(len(p)-1))) - req.Header.Set("Content-Length", fmt.Sprintf("%d", len(p))) - req.Header.Set("Content-Type", "application/octet-stream") - - resp, err := hbu.client.Do(req) - if err != nil { - return 0, err - } - - if !SuccessStatus(resp.StatusCode) { - return 0, hbu.handleErrorResponse(resp) - } - - hbu.uuid = resp.Header.Get("Docker-Upload-UUID") - hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) - if err != nil { - return 0, err - } - rng := resp.Header.Get("Range") - var start, end int - if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { - return 0, err - } else if n != 2 || end < start { - return 0, fmt.Errorf("bad range format: %s", rng) - } - - return (end - start + 1), nil - -} - -func (hbu *httpBlobUpload) Size() int64 { - return hbu.offset -} - -func (hbu *httpBlobUpload) ID() string { - return hbu.uuid -} - -func (hbu *httpBlobUpload) StartedAt() time.Time { - return hbu.startedAt -} - -func (hbu *httpBlobUpload) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { - // TODO(dmcgowan): Check if already finished, if so just fetch - req, err := http.NewRequest("PUT", hbu.location, nil) - if err != nil { - return distribution.Descriptor{}, err - } - - values := req.URL.Query() - values.Set("digest", desc.Digest.String()) - req.URL.RawQuery = values.Encode() - - resp, err := hbu.client.Do(req) - if err != nil { - return distribution.Descriptor{}, err - } - defer resp.Body.Close() - - if !SuccessStatus(resp.StatusCode) { - return distribution.Descriptor{}, hbu.handleErrorResponse(resp) - } - - return hbu.statter.Stat(ctx, desc.Digest) -} - -func (hbu *httpBlobUpload) Cancel(ctx context.Context) error { - req, err := http.NewRequest("DELETE", hbu.location, nil) - if err != nil { - return err - } - resp, err := hbu.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if resp.StatusCode == http.StatusNotFound || SuccessStatus(resp.StatusCode) { - return nil - } - return hbu.handleErrorResponse(resp) -} - -func (hbu *httpBlobUpload) Close() error { - hbu.closed = true - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/client/errors.go b/vendor/github.com/docker/distribution/registry/client/errors.go deleted file mode 100644 index 52d49d5d2..000000000 --- a/vendor/github.com/docker/distribution/registry/client/errors.go +++ /dev/null @@ -1,139 +0,0 @@ -package client - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/client/auth/challenge" -) - -// ErrNoErrorsInBody is returned when an HTTP response body parses to an empty -// errcode.Errors slice. -var ErrNoErrorsInBody = errors.New("no error details found in HTTP response body") - -// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is -// returned when making a registry api call. -type UnexpectedHTTPStatusError struct { - Status string -} - -func (e *UnexpectedHTTPStatusError) Error() string { - return fmt.Sprintf("received unexpected HTTP status: %s", e.Status) -} - -// UnexpectedHTTPResponseError is returned when an expected HTTP status code -// is returned, but the content was unexpected and failed to be parsed. -type UnexpectedHTTPResponseError struct { - ParseErr error - StatusCode int - Response []byte -} - -func (e *UnexpectedHTTPResponseError) Error() string { - return fmt.Sprintf("error parsing HTTP %d response body: %s: %q", e.StatusCode, e.ParseErr.Error(), string(e.Response)) -} - -func parseHTTPErrorResponse(statusCode int, r io.Reader) error { - var errors errcode.Errors - body, err := ioutil.ReadAll(r) - if err != nil { - return err - } - - // For backward compatibility, handle irregularly formatted - // messages that contain a "details" field. - var detailsErr struct { - Details string `json:"details"` - } - err = json.Unmarshal(body, &detailsErr) - if err == nil && detailsErr.Details != "" { - switch statusCode { - case http.StatusUnauthorized: - return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details) - case http.StatusTooManyRequests: - return errcode.ErrorCodeTooManyRequests.WithMessage(detailsErr.Details) - default: - return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details) - } - } - - if err := json.Unmarshal(body, &errors); err != nil { - return &UnexpectedHTTPResponseError{ - ParseErr: err, - StatusCode: statusCode, - Response: body, - } - } - - if len(errors) == 0 { - // If there was no error specified in the body, return - // UnexpectedHTTPResponseError. - return &UnexpectedHTTPResponseError{ - ParseErr: ErrNoErrorsInBody, - StatusCode: statusCode, - Response: body, - } - } - - return errors -} - -func makeErrorList(err error) []error { - if errL, ok := err.(errcode.Errors); ok { - return []error(errL) - } - return []error{err} -} - -func mergeErrors(err1, err2 error) error { - return errcode.Errors(append(makeErrorList(err1), makeErrorList(err2)...)) -} - -// HandleErrorResponse returns error parsed from HTTP response for an -// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An -// UnexpectedHTTPStatusError returned for response code outside of expected -// range. -func HandleErrorResponse(resp *http.Response) error { - if resp.StatusCode >= 400 && resp.StatusCode < 500 { - // Check for OAuth errors within the `WWW-Authenticate` header first - // See https://tools.ietf.org/html/rfc6750#section-3 - for _, c := range challenge.ResponseChallenges(resp) { - if c.Scheme == "bearer" { - var err errcode.Error - // codes defined at https://tools.ietf.org/html/rfc6750#section-3.1 - switch c.Parameters["error"] { - case "invalid_token": - err.Code = errcode.ErrorCodeUnauthorized - case "insufficient_scope": - err.Code = errcode.ErrorCodeDenied - default: - continue - } - if description := c.Parameters["error_description"]; description != "" { - err.Message = description - } else { - err.Message = err.Code.Message() - } - - return mergeErrors(err, parseHTTPErrorResponse(resp.StatusCode, resp.Body)) - } - } - err := parseHTTPErrorResponse(resp.StatusCode, resp.Body) - if uErr, ok := err.(*UnexpectedHTTPResponseError); ok && resp.StatusCode == 401 { - return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response) - } - return err - } - return &UnexpectedHTTPStatusError{Status: resp.Status} -} - -// SuccessStatus returns true if the argument is a successful HTTP response -// code (in the range 200 - 399 inclusive). -func SuccessStatus(status int) bool { - return status >= 200 && status <= 399 -} diff --git a/vendor/github.com/docker/distribution/registry/client/repository.go b/vendor/github.com/docker/distribution/registry/client/repository.go deleted file mode 100644 index 1ebd0b183..000000000 --- a/vendor/github.com/docker/distribution/registry/client/repository.go +++ /dev/null @@ -1,853 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/distribution/registry/storage/cache" - "github.com/docker/distribution/registry/storage/cache/memory" -) - -// Registry provides an interface for calling Repositories, which returns a catalog of repositories. -type Registry interface { - Repositories(ctx context.Context, repos []string, last string) (n int, err error) -} - -// checkHTTPRedirect is a callback that can manipulate redirected HTTP -// requests. It is used to preserve Accept and Range headers. -func checkHTTPRedirect(req *http.Request, via []*http.Request) error { - if len(via) >= 10 { - return errors.New("stopped after 10 redirects") - } - - if len(via) > 0 { - for headerName, headerVals := range via[0].Header { - if headerName != "Accept" && headerName != "Range" { - continue - } - for _, val := range headerVals { - // Don't add to redirected request if redirected - // request already has a header with the same - // name and value. - hasValue := false - for _, existingVal := range req.Header[headerName] { - if existingVal == val { - hasValue = true - break - } - } - if !hasValue { - req.Header.Add(headerName, val) - } - } - } - } - - return nil -} - -// NewRegistry creates a registry namespace which can be used to get a listing of repositories -func NewRegistry(ctx context.Context, baseURL string, transport http.RoundTripper) (Registry, error) { - ub, err := v2.NewURLBuilderFromString(baseURL, false) - if err != nil { - return nil, err - } - - client := &http.Client{ - Transport: transport, - Timeout: 1 * time.Minute, - CheckRedirect: checkHTTPRedirect, - } - - return ®istry{ - client: client, - ub: ub, - context: ctx, - }, nil -} - -type registry struct { - client *http.Client - ub *v2.URLBuilder - context context.Context -} - -// Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size -// of the slice, starting at the value provided in 'last'. The number of entries will be returned along with io.EOF if there -// are no more entries -func (r *registry) Repositories(ctx context.Context, entries []string, last string) (int, error) { - var numFilled int - var returnErr error - - values := buildCatalogValues(len(entries), last) - u, err := r.ub.BuildCatalogURL(values) - if err != nil { - return 0, err - } - - resp, err := r.client.Get(u) - if err != nil { - return 0, err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - var ctlg struct { - Repositories []string `json:"repositories"` - } - decoder := json.NewDecoder(resp.Body) - - if err := decoder.Decode(&ctlg); err != nil { - return 0, err - } - - for cnt := range ctlg.Repositories { - entries[cnt] = ctlg.Repositories[cnt] - } - numFilled = len(ctlg.Repositories) - - link := resp.Header.Get("Link") - if link == "" { - returnErr = io.EOF - } - } else { - return 0, HandleErrorResponse(resp) - } - - return numFilled, returnErr -} - -// NewRepository creates a new Repository for the given repository name and base URL. -func NewRepository(ctx context.Context, name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { - ub, err := v2.NewURLBuilderFromString(baseURL, false) - if err != nil { - return nil, err - } - - client := &http.Client{ - Transport: transport, - CheckRedirect: checkHTTPRedirect, - // TODO(dmcgowan): create cookie jar - } - - return &repository{ - client: client, - ub: ub, - name: name, - context: ctx, - }, nil -} - -type repository struct { - client *http.Client - ub *v2.URLBuilder - context context.Context - name reference.Named -} - -func (r *repository) Named() reference.Named { - return r.name -} - -func (r *repository) Blobs(ctx context.Context) distribution.BlobStore { - statter := &blobStatter{ - name: r.name, - ub: r.ub, - client: r.client, - } - return &blobs{ - name: r.name, - ub: r.ub, - client: r.client, - statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(), statter), - } -} - -func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { - // todo(richardscothern): options should be sent over the wire - return &manifests{ - name: r.name, - ub: r.ub, - client: r.client, - etags: make(map[string]string), - }, nil -} - -func (r *repository) Tags(ctx context.Context) distribution.TagService { - return &tags{ - client: r.client, - ub: r.ub, - context: r.context, - name: r.Named(), - } -} - -// tags implements remote tagging operations. -type tags struct { - client *http.Client - ub *v2.URLBuilder - context context.Context - name reference.Named -} - -// All returns all tags -func (t *tags) All(ctx context.Context) ([]string, error) { - var tags []string - - u, err := t.ub.BuildTagsURL(t.name) - if err != nil { - return tags, err - } - - for { - resp, err := t.client.Get(u) - if err != nil { - return tags, err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - b, err := ioutil.ReadAll(resp.Body) - if err != nil { - return tags, err - } - - tagsResponse := struct { - Tags []string `json:"tags"` - }{} - if err := json.Unmarshal(b, &tagsResponse); err != nil { - return tags, err - } - tags = append(tags, tagsResponse.Tags...) - if link := resp.Header.Get("Link"); link != "" { - u = strings.Trim(strings.Split(link, ";")[0], "<>") - } else { - return tags, nil - } - } else { - return tags, HandleErrorResponse(resp) - } - } -} - -func descriptorFromResponse(response *http.Response) (distribution.Descriptor, error) { - desc := distribution.Descriptor{} - headers := response.Header - - ctHeader := headers.Get("Content-Type") - if ctHeader == "" { - return distribution.Descriptor{}, errors.New("missing or empty Content-Type header") - } - desc.MediaType = ctHeader - - digestHeader := headers.Get("Docker-Content-Digest") - if digestHeader == "" { - bytes, err := ioutil.ReadAll(response.Body) - if err != nil { - return distribution.Descriptor{}, err - } - _, desc, err := distribution.UnmarshalManifest(ctHeader, bytes) - if err != nil { - return distribution.Descriptor{}, err - } - return desc, nil - } - - dgst, err := digest.ParseDigest(digestHeader) - if err != nil { - return distribution.Descriptor{}, err - } - desc.Digest = dgst - - lengthHeader := headers.Get("Content-Length") - if lengthHeader == "" { - return distribution.Descriptor{}, errors.New("missing or empty Content-Length header") - } - length, err := strconv.ParseInt(lengthHeader, 10, 64) - if err != nil { - return distribution.Descriptor{}, err - } - desc.Size = length - - return desc, nil - -} - -// Get issues a HEAD request for a Manifest against its named endpoint in order -// to construct a descriptor for the tag. If the registry doesn't support HEADing -// a manifest, fallback to GET. -func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { - ref, err := reference.WithTag(t.name, tag) - if err != nil { - return distribution.Descriptor{}, err - } - u, err := t.ub.BuildManifestURL(ref) - if err != nil { - return distribution.Descriptor{}, err - } - - newRequest := func(method string) (*http.Response, error) { - req, err := http.NewRequest(method, u, nil) - if err != nil { - return nil, err - } - - for _, t := range distribution.ManifestMediaTypes() { - req.Header.Add("Accept", t) - } - resp, err := t.client.Do(req) - return resp, err - } - - resp, err := newRequest("HEAD") - if err != nil { - return distribution.Descriptor{}, err - } - defer resp.Body.Close() - - switch { - case resp.StatusCode >= 200 && resp.StatusCode < 400: - return descriptorFromResponse(resp) - default: - // if the response is an error - there will be no body to decode. - // Issue a GET request: - // - for data from a server that does not handle HEAD - // - to get error details in case of a failure - resp, err = newRequest("GET") - if err != nil { - return distribution.Descriptor{}, err - } - defer resp.Body.Close() - - if resp.StatusCode >= 200 && resp.StatusCode < 400 { - return descriptorFromResponse(resp) - } - return distribution.Descriptor{}, HandleErrorResponse(resp) - } -} - -func (t *tags) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { - panic("not implemented") -} - -func (t *tags) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { - panic("not implemented") -} - -func (t *tags) Untag(ctx context.Context, tag string) error { - panic("not implemented") -} - -type manifests struct { - name reference.Named - ub *v2.URLBuilder - client *http.Client - etags map[string]string -} - -func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { - ref, err := reference.WithDigest(ms.name, dgst) - if err != nil { - return false, err - } - u, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return false, err - } - - resp, err := ms.client.Head(u) - if err != nil { - return false, err - } - - if SuccessStatus(resp.StatusCode) { - return true, nil - } else if resp.StatusCode == http.StatusNotFound { - return false, nil - } - return false, HandleErrorResponse(resp) -} - -// AddEtagToTag allows a client to supply an eTag to Get which will be -// used for a conditional HTTP request. If the eTag matches, a nil manifest -// and ErrManifestNotModified error will be returned. etag is automatically -// quoted when added to this map. -func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption { - return etagOption{tag, etag} -} - -type etagOption struct{ tag, etag string } - -func (o etagOption) Apply(ms distribution.ManifestService) error { - if ms, ok := ms.(*manifests); ok { - ms.etags[o.tag] = fmt.Sprintf(`"%s"`, o.etag) - return nil - } - return fmt.Errorf("etag options is a client-only option") -} - -// ReturnContentDigest allows a client to set a the content digest on -// a successful request from the 'Docker-Content-Digest' header. This -// returned digest is represents the digest which the registry uses -// to refer to the content and can be used to delete the content. -func ReturnContentDigest(dgst *digest.Digest) distribution.ManifestServiceOption { - return contentDigestOption{dgst} -} - -type contentDigestOption struct{ digest *digest.Digest } - -func (o contentDigestOption) Apply(ms distribution.ManifestService) error { - return nil -} - -func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { - var ( - digestOrTag string - ref reference.Named - err error - contentDgst *digest.Digest - ) - - for _, option := range options { - if opt, ok := option.(distribution.WithTagOption); ok { - digestOrTag = opt.Tag - ref, err = reference.WithTag(ms.name, opt.Tag) - if err != nil { - return nil, err - } - } else if opt, ok := option.(contentDigestOption); ok { - contentDgst = opt.digest - } else { - err := option.Apply(ms) - if err != nil { - return nil, err - } - } - } - - if digestOrTag == "" { - digestOrTag = dgst.String() - ref, err = reference.WithDigest(ms.name, dgst) - if err != nil { - return nil, err - } - } - - u, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return nil, err - } - - req, err := http.NewRequest("GET", u, nil) - if err != nil { - return nil, err - } - - for _, t := range distribution.ManifestMediaTypes() { - req.Header.Add("Accept", t) - } - - if _, ok := ms.etags[digestOrTag]; ok { - req.Header.Set("If-None-Match", ms.etags[digestOrTag]) - } - - resp, err := ms.client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - if resp.StatusCode == http.StatusNotModified { - return nil, distribution.ErrManifestNotModified - } else if SuccessStatus(resp.StatusCode) { - if contentDgst != nil { - dgst, err := digest.ParseDigest(resp.Header.Get("Docker-Content-Digest")) - if err == nil { - *contentDgst = dgst - } - } - mt := resp.Header.Get("Content-Type") - body, err := ioutil.ReadAll(resp.Body) - - if err != nil { - return nil, err - } - m, _, err := distribution.UnmarshalManifest(mt, body) - if err != nil { - return nil, err - } - return m, nil - } - return nil, HandleErrorResponse(resp) -} - -// Put puts a manifest. A tag can be specified using an options parameter which uses some shared state to hold the -// tag name in order to build the correct upload URL. -func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { - ref := ms.name - var tagged bool - - for _, option := range options { - if opt, ok := option.(distribution.WithTagOption); ok { - var err error - ref, err = reference.WithTag(ref, opt.Tag) - if err != nil { - return "", err - } - tagged = true - } else { - err := option.Apply(ms) - if err != nil { - return "", err - } - } - } - mediaType, p, err := m.Payload() - if err != nil { - return "", err - } - - if !tagged { - // generate a canonical digest and Put by digest - _, d, err := distribution.UnmarshalManifest(mediaType, p) - if err != nil { - return "", err - } - ref, err = reference.WithDigest(ref, d.Digest) - if err != nil { - return "", err - } - } - - manifestURL, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return "", err - } - - putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(p)) - if err != nil { - return "", err - } - - putRequest.Header.Set("Content-Type", mediaType) - - resp, err := ms.client.Do(putRequest) - if err != nil { - return "", err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - dgstHeader := resp.Header.Get("Docker-Content-Digest") - dgst, err := digest.ParseDigest(dgstHeader) - if err != nil { - return "", err - } - - return dgst, nil - } - - return "", HandleErrorResponse(resp) -} - -func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error { - ref, err := reference.WithDigest(ms.name, dgst) - if err != nil { - return err - } - u, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return err - } - req, err := http.NewRequest("DELETE", u, nil) - if err != nil { - return err - } - - resp, err := ms.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - return nil - } - return HandleErrorResponse(resp) -} - -// todo(richardscothern): Restore interface and implementation with merge of #1050 -/*func (ms *manifests) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) { - panic("not supported") -}*/ - -type blobs struct { - name reference.Named - ub *v2.URLBuilder - client *http.Client - - statter distribution.BlobDescriptorService - distribution.BlobDeleter -} - -func sanitizeLocation(location, base string) (string, error) { - baseURL, err := url.Parse(base) - if err != nil { - return "", err - } - - locationURL, err := url.Parse(location) - if err != nil { - return "", err - } - - return baseURL.ResolveReference(locationURL).String(), nil -} - -func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - return bs.statter.Stat(ctx, dgst) - -} - -func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { - reader, err := bs.Open(ctx, dgst) - if err != nil { - return nil, err - } - defer reader.Close() - - return ioutil.ReadAll(reader) -} - -func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { - ref, err := reference.WithDigest(bs.name, dgst) - if err != nil { - return nil, err - } - blobURL, err := bs.ub.BuildBlobURL(ref) - if err != nil { - return nil, err - } - - return transport.NewHTTPReadSeeker(bs.client, blobURL, - func(resp *http.Response) error { - if resp.StatusCode == http.StatusNotFound { - return distribution.ErrBlobUnknown - } - return HandleErrorResponse(resp) - }), nil -} - -func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { - panic("not implemented") -} - -func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { - writer, err := bs.Create(ctx) - if err != nil { - return distribution.Descriptor{}, err - } - dgstr := digest.Canonical.New() - n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash())) - if err != nil { - return distribution.Descriptor{}, err - } - if n < int64(len(p)) { - return distribution.Descriptor{}, fmt.Errorf("short copy: wrote %d of %d", n, len(p)) - } - - desc := distribution.Descriptor{ - MediaType: mediaType, - Size: int64(len(p)), - Digest: dgstr.Digest(), - } - - return writer.Commit(ctx, desc) -} - -type optionFunc func(interface{}) error - -func (f optionFunc) Apply(v interface{}) error { - return f(v) -} - -// WithMountFrom returns a BlobCreateOption which designates that the blob should be -// mounted from the given canonical reference. -func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { - return optionFunc(func(v interface{}) error { - opts, ok := v.(*distribution.CreateOptions) - if !ok { - return fmt.Errorf("unexpected options type: %T", v) - } - - opts.Mount.ShouldMount = true - opts.Mount.From = ref - - return nil - }) -} - -func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { - var opts distribution.CreateOptions - - for _, option := range options { - err := option.Apply(&opts) - if err != nil { - return nil, err - } - } - - var values []url.Values - - if opts.Mount.ShouldMount { - values = append(values, url.Values{"from": {opts.Mount.From.Name()}, "mount": {opts.Mount.From.Digest().String()}}) - } - - u, err := bs.ub.BuildBlobUploadURL(bs.name, values...) - if err != nil { - return nil, err - } - - resp, err := bs.client.Post(u, "", nil) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - switch resp.StatusCode { - case http.StatusCreated: - desc, err := bs.statter.Stat(ctx, opts.Mount.From.Digest()) - if err != nil { - return nil, err - } - return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc} - case http.StatusAccepted: - // TODO(dmcgowan): Check for invalid UUID - uuid := resp.Header.Get("Docker-Upload-UUID") - location, err := sanitizeLocation(resp.Header.Get("Location"), u) - if err != nil { - return nil, err - } - - return &httpBlobUpload{ - statter: bs.statter, - client: bs.client, - uuid: uuid, - startedAt: time.Now(), - location: location, - }, nil - default: - return nil, HandleErrorResponse(resp) - } -} - -func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { - panic("not implemented") -} - -func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error { - return bs.statter.Clear(ctx, dgst) -} - -type blobStatter struct { - name reference.Named - ub *v2.URLBuilder - client *http.Client -} - -func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - ref, err := reference.WithDigest(bs.name, dgst) - if err != nil { - return distribution.Descriptor{}, err - } - u, err := bs.ub.BuildBlobURL(ref) - if err != nil { - return distribution.Descriptor{}, err - } - - resp, err := bs.client.Head(u) - if err != nil { - return distribution.Descriptor{}, err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - lengthHeader := resp.Header.Get("Content-Length") - if lengthHeader == "" { - return distribution.Descriptor{}, fmt.Errorf("missing content-length header for request: %s", u) - } - - length, err := strconv.ParseInt(lengthHeader, 10, 64) - if err != nil { - return distribution.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err) - } - - return distribution.Descriptor{ - MediaType: resp.Header.Get("Content-Type"), - Size: length, - Digest: dgst, - }, nil - } else if resp.StatusCode == http.StatusNotFound { - return distribution.Descriptor{}, distribution.ErrBlobUnknown - } - return distribution.Descriptor{}, HandleErrorResponse(resp) -} - -func buildCatalogValues(maxEntries int, last string) url.Values { - values := url.Values{} - - if maxEntries > 0 { - values.Add("n", strconv.Itoa(maxEntries)) - } - - if last != "" { - values.Add("last", last) - } - - return values -} - -func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { - ref, err := reference.WithDigest(bs.name, dgst) - if err != nil { - return err - } - blobURL, err := bs.ub.BuildBlobURL(ref) - if err != nil { - return err - } - - req, err := http.NewRequest("DELETE", blobURL, nil) - if err != nil { - return err - } - - resp, err := bs.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - return nil - } - return HandleErrorResponse(resp) -} - -func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go b/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go deleted file mode 100644 index e5ff09d75..000000000 --- a/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go +++ /dev/null @@ -1,251 +0,0 @@ -package transport - -import ( - "errors" - "fmt" - "io" - "net/http" - "os" - "regexp" - "strconv" -) - -var ( - contentRangeRegexp = regexp.MustCompile(`bytes ([0-9]+)-([0-9]+)/([0-9]+|\\*)`) - - // ErrWrongCodeForByteRange is returned if the client sends a request - // with a Range header but the server returns a 2xx or 3xx code other - // than 206 Partial Content. - ErrWrongCodeForByteRange = errors.New("expected HTTP 206 from byte range request") -) - -// ReadSeekCloser combines io.ReadSeeker with io.Closer. -type ReadSeekCloser interface { - io.ReadSeeker - io.Closer -} - -// NewHTTPReadSeeker handles reading from an HTTP endpoint using a GET -// request. When seeking and starting a read from a non-zero offset -// the a "Range" header will be added which sets the offset. -// TODO(dmcgowan): Move this into a separate utility package -func NewHTTPReadSeeker(client *http.Client, url string, errorHandler func(*http.Response) error) ReadSeekCloser { - return &httpReadSeeker{ - client: client, - url: url, - errorHandler: errorHandler, - } -} - -type httpReadSeeker struct { - client *http.Client - url string - - // errorHandler creates an error from an unsuccessful HTTP response. - // This allows the error to be created with the HTTP response body - // without leaking the body through a returned error. - errorHandler func(*http.Response) error - - size int64 - - // rc is the remote read closer. - rc io.ReadCloser - // readerOffset tracks the offset as of the last read. - readerOffset int64 - // seekOffset allows Seek to override the offset. Seek changes - // seekOffset instead of changing readOffset directly so that - // connection resets can be delayed and possibly avoided if the - // seek is undone (i.e. seeking to the end and then back to the - // beginning). - seekOffset int64 - err error -} - -func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { - if hrs.err != nil { - return 0, hrs.err - } - - // If we sought to a different position, we need to reset the - // connection. This logic is here instead of Seek so that if - // a seek is undone before the next read, the connection doesn't - // need to be closed and reopened. A common example of this is - // seeking to the end to determine the length, and then seeking - // back to the original position. - if hrs.readerOffset != hrs.seekOffset { - hrs.reset() - } - - hrs.readerOffset = hrs.seekOffset - - rd, err := hrs.reader() - if err != nil { - return 0, err - } - - n, err = rd.Read(p) - hrs.seekOffset += int64(n) - hrs.readerOffset += int64(n) - - return n, err -} - -func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { - if hrs.err != nil { - return 0, hrs.err - } - - lastReaderOffset := hrs.readerOffset - - if whence == os.SEEK_SET && hrs.rc == nil { - // If no request has been made yet, and we are seeking to an - // absolute position, set the read offset as well to avoid an - // unnecessary request. - hrs.readerOffset = offset - } - - _, err := hrs.reader() - if err != nil { - hrs.readerOffset = lastReaderOffset - return 0, err - } - - newOffset := hrs.seekOffset - - switch whence { - case os.SEEK_CUR: - newOffset += offset - case os.SEEK_END: - if hrs.size < 0 { - return 0, errors.New("content length not known") - } - newOffset = hrs.size + offset - case os.SEEK_SET: - newOffset = offset - } - - if newOffset < 0 { - err = errors.New("cannot seek to negative position") - } else { - hrs.seekOffset = newOffset - } - - return hrs.seekOffset, err -} - -func (hrs *httpReadSeeker) Close() error { - if hrs.err != nil { - return hrs.err - } - - // close and release reader chain - if hrs.rc != nil { - hrs.rc.Close() - } - - hrs.rc = nil - - hrs.err = errors.New("httpLayer: closed") - - return nil -} - -func (hrs *httpReadSeeker) reset() { - if hrs.err != nil { - return - } - if hrs.rc != nil { - hrs.rc.Close() - hrs.rc = nil - } -} - -func (hrs *httpReadSeeker) reader() (io.Reader, error) { - if hrs.err != nil { - return nil, hrs.err - } - - if hrs.rc != nil { - return hrs.rc, nil - } - - req, err := http.NewRequest("GET", hrs.url, nil) - if err != nil { - return nil, err - } - - if hrs.readerOffset > 0 { - // If we are at different offset, issue a range request from there. - req.Header.Add("Range", fmt.Sprintf("bytes=%d-", hrs.readerOffset)) - // TODO: get context in here - // context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range")) - } - - req.Header.Add("Accept-Encoding", "identity") - resp, err := hrs.client.Do(req) - if err != nil { - return nil, err - } - - // Normally would use client.SuccessStatus, but that would be a cyclic - // import - if resp.StatusCode >= 200 && resp.StatusCode <= 399 { - if hrs.readerOffset > 0 { - if resp.StatusCode != http.StatusPartialContent { - return nil, ErrWrongCodeForByteRange - } - - contentRange := resp.Header.Get("Content-Range") - if contentRange == "" { - return nil, errors.New("no Content-Range header found in HTTP 206 response") - } - - submatches := contentRangeRegexp.FindStringSubmatch(contentRange) - if len(submatches) < 4 { - return nil, fmt.Errorf("could not parse Content-Range header: %s", contentRange) - } - - startByte, err := strconv.ParseUint(submatches[1], 10, 64) - if err != nil { - return nil, fmt.Errorf("could not parse start of range in Content-Range header: %s", contentRange) - } - - if startByte != uint64(hrs.readerOffset) { - return nil, fmt.Errorf("received Content-Range starting at offset %d instead of requested %d", startByte, hrs.readerOffset) - } - - endByte, err := strconv.ParseUint(submatches[2], 10, 64) - if err != nil { - return nil, fmt.Errorf("could not parse end of range in Content-Range header: %s", contentRange) - } - - if submatches[3] == "*" { - hrs.size = -1 - } else { - size, err := strconv.ParseUint(submatches[3], 10, 64) - if err != nil { - return nil, fmt.Errorf("could not parse total size in Content-Range header: %s", contentRange) - } - - if endByte+1 != size { - return nil, fmt.Errorf("range in Content-Range stops before the end of the content: %s", contentRange) - } - - hrs.size = int64(size) - } - } else if resp.StatusCode == http.StatusOK { - hrs.size = resp.ContentLength - } else { - hrs.size = -1 - } - hrs.rc = resp.Body - } else { - defer resp.Body.Close() - if hrs.errorHandler != nil { - return nil, hrs.errorHandler(resp) - } - return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status) - } - - return hrs.rc, nil -} diff --git a/vendor/github.com/docker/distribution/registry/client/transport/transport.go b/vendor/github.com/docker/distribution/registry/client/transport/transport.go deleted file mode 100644 index 30e45fab0..000000000 --- a/vendor/github.com/docker/distribution/registry/client/transport/transport.go +++ /dev/null @@ -1,147 +0,0 @@ -package transport - -import ( - "io" - "net/http" - "sync" -) - -// RequestModifier represents an object which will do an inplace -// modification of an HTTP request. -type RequestModifier interface { - ModifyRequest(*http.Request) error -} - -type headerModifier http.Header - -// NewHeaderRequestModifier returns a new RequestModifier which will -// add the given headers to a request. -func NewHeaderRequestModifier(header http.Header) RequestModifier { - return headerModifier(header) -} - -func (h headerModifier) ModifyRequest(req *http.Request) error { - for k, s := range http.Header(h) { - req.Header[k] = append(req.Header[k], s...) - } - - return nil -} - -// NewTransport creates a new transport which will apply modifiers to -// the request on a RoundTrip call. -func NewTransport(base http.RoundTripper, modifiers ...RequestModifier) http.RoundTripper { - return &transport{ - Modifiers: modifiers, - Base: base, - } -} - -// transport is an http.RoundTripper that makes HTTP requests after -// copying and modifying the request -type transport struct { - Modifiers []RequestModifier - Base http.RoundTripper - - mu sync.Mutex // guards modReq - modReq map[*http.Request]*http.Request // original -> modified -} - -// RoundTrip authorizes and authenticates the request with an -// access token. If no token exists or token is expired, -// tries to refresh/fetch a new token. -func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) { - req2 := cloneRequest(req) - for _, modifier := range t.Modifiers { - if err := modifier.ModifyRequest(req2); err != nil { - return nil, err - } - } - - t.setModReq(req, req2) - res, err := t.base().RoundTrip(req2) - if err != nil { - t.setModReq(req, nil) - return nil, err - } - res.Body = &onEOFReader{ - rc: res.Body, - fn: func() { t.setModReq(req, nil) }, - } - return res, nil -} - -// CancelRequest cancels an in-flight request by closing its connection. -func (t *transport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := t.base().(canceler); ok { - t.mu.Lock() - modReq := t.modReq[req] - delete(t.modReq, req) - t.mu.Unlock() - cr.CancelRequest(modReq) - } -} - -func (t *transport) base() http.RoundTripper { - if t.Base != nil { - return t.Base - } - return http.DefaultTransport -} - -func (t *transport) setModReq(orig, mod *http.Request) { - t.mu.Lock() - defer t.mu.Unlock() - if t.modReq == nil { - t.modReq = make(map[*http.Request]*http.Request) - } - if mod == nil { - delete(t.modReq, orig) - } else { - t.modReq[orig] = mod - } -} - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header, len(r.Header)) - for k, s := range r.Header { - r2.Header[k] = append([]string(nil), s...) - } - - return r2 -} - -type onEOFReader struct { - rc io.ReadCloser - fn func() -} - -func (r *onEOFReader) Read(p []byte) (n int, err error) { - n, err = r.rc.Read(p) - if err == io.EOF { - r.runFunc() - } - return -} - -func (r *onEOFReader) Close() error { - err := r.rc.Close() - r.runFunc() - return err -} - -func (r *onEOFReader) runFunc() { - if fn := r.fn; fn != nil { - fn() - r.fn = nil - } -} diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/cache.go b/vendor/github.com/docker/distribution/registry/storage/cache/cache.go deleted file mode 100644 index 10a390919..000000000 --- a/vendor/github.com/docker/distribution/registry/storage/cache/cache.go +++ /dev/null @@ -1,35 +0,0 @@ -// Package cache provides facilities to speed up access to the storage -// backend. -package cache - -import ( - "fmt" - - "github.com/docker/distribution" -) - -// BlobDescriptorCacheProvider provides repository scoped -// BlobDescriptorService cache instances and a global descriptor cache. -type BlobDescriptorCacheProvider interface { - distribution.BlobDescriptorService - - RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) -} - -// ValidateDescriptor provides a helper function to ensure that caches have -// common criteria for admitting descriptors. -func ValidateDescriptor(desc distribution.Descriptor) error { - if err := desc.Digest.Validate(); err != nil { - return err - } - - if desc.Size < 0 { - return fmt.Errorf("cache: invalid length in descriptor: %v < 0", desc.Size) - } - - if desc.MediaType == "" { - return fmt.Errorf("cache: empty mediatype on descriptor: %v", desc) - } - - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go b/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go deleted file mode 100644 index 94ca8a90c..000000000 --- a/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go +++ /dev/null @@ -1,101 +0,0 @@ -package cache - -import ( - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - - "github.com/docker/distribution" -) - -// Metrics is used to hold metric counters -// related to the number of times a cache was -// hit or missed. -type Metrics struct { - Requests uint64 - Hits uint64 - Misses uint64 -} - -// MetricsTracker represents a metric tracker -// which simply counts the number of hits and misses. -type MetricsTracker interface { - Hit() - Miss() - Metrics() Metrics -} - -type cachedBlobStatter struct { - cache distribution.BlobDescriptorService - backend distribution.BlobDescriptorService - tracker MetricsTracker -} - -// NewCachedBlobStatter creates a new statter which prefers a cache and -// falls back to a backend. -func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService) distribution.BlobDescriptorService { - return &cachedBlobStatter{ - cache: cache, - backend: backend, - } -} - -// NewCachedBlobStatterWithMetrics creates a new statter which prefers a cache and -// falls back to a backend. Hits and misses will send to the tracker. -func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService, tracker MetricsTracker) distribution.BlobStatter { - return &cachedBlobStatter{ - cache: cache, - backend: backend, - tracker: tracker, - } -} - -func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - desc, err := cbds.cache.Stat(ctx, dgst) - if err != nil { - if err != distribution.ErrBlobUnknown { - context.GetLogger(ctx).Errorf("error retrieving descriptor from cache: %v", err) - } - - goto fallback - } - - if cbds.tracker != nil { - cbds.tracker.Hit() - } - return desc, nil -fallback: - if cbds.tracker != nil { - cbds.tracker.Miss() - } - desc, err = cbds.backend.Stat(ctx, dgst) - if err != nil { - return desc, err - } - - if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { - context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err) - } - - return desc, err - -} - -func (cbds *cachedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) error { - err := cbds.cache.Clear(ctx, dgst) - if err != nil { - return err - } - - err = cbds.backend.Clear(ctx, dgst) - if err != nil { - return err - } - return nil -} - -func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { - context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err) - } - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go b/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go deleted file mode 100644 index cf125e187..000000000 --- a/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go +++ /dev/null @@ -1,179 +0,0 @@ -package memory - -import ( - "sync" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/storage/cache" -) - -type inMemoryBlobDescriptorCacheProvider struct { - global *mapBlobDescriptorCache - repositories map[string]*mapBlobDescriptorCache - mu sync.RWMutex -} - -// NewInMemoryBlobDescriptorCacheProvider returns a new mapped-based cache for -// storing blob descriptor data. -func NewInMemoryBlobDescriptorCacheProvider() cache.BlobDescriptorCacheProvider { - return &inMemoryBlobDescriptorCacheProvider{ - global: newMapBlobDescriptorCache(), - repositories: make(map[string]*mapBlobDescriptorCache), - } -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { - if _, err := reference.ParseNamed(repo); err != nil { - return nil, err - } - - imbdcp.mu.RLock() - defer imbdcp.mu.RUnlock() - - return &repositoryScopedInMemoryBlobDescriptorCache{ - repo: repo, - parent: imbdcp, - repository: imbdcp.repositories[repo], - }, nil -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - return imbdcp.global.Stat(ctx, dgst) -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) Clear(ctx context.Context, dgst digest.Digest) error { - return imbdcp.global.Clear(ctx, dgst) -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - _, err := imbdcp.Stat(ctx, dgst) - if err == distribution.ErrBlobUnknown { - - if dgst.Algorithm() != desc.Digest.Algorithm() && dgst != desc.Digest { - // if the digests differ, set the other canonical mapping - if err := imbdcp.global.SetDescriptor(ctx, desc.Digest, desc); err != nil { - return err - } - } - - // unknown, just set it - return imbdcp.global.SetDescriptor(ctx, dgst, desc) - } - - // we already know it, do nothing - return err -} - -// repositoryScopedInMemoryBlobDescriptorCache provides the request scoped -// repository cache. Instances are not thread-safe but the delegated -// operations are. -type repositoryScopedInMemoryBlobDescriptorCache struct { - repo string - parent *inMemoryBlobDescriptorCacheProvider // allows lazy allocation of repo's map - repository *mapBlobDescriptorCache -} - -func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - rsimbdcp.parent.mu.Lock() - repo := rsimbdcp.repository - rsimbdcp.parent.mu.Unlock() - - if repo == nil { - return distribution.Descriptor{}, distribution.ErrBlobUnknown - } - - return repo.Stat(ctx, dgst) -} - -func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { - rsimbdcp.parent.mu.Lock() - repo := rsimbdcp.repository - rsimbdcp.parent.mu.Unlock() - - if repo == nil { - return distribution.ErrBlobUnknown - } - - return repo.Clear(ctx, dgst) -} - -func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - rsimbdcp.parent.mu.Lock() - repo := rsimbdcp.repository - if repo == nil { - // allocate map since we are setting it now. - var ok bool - // have to read back value since we may have allocated elsewhere. - repo, ok = rsimbdcp.parent.repositories[rsimbdcp.repo] - if !ok { - repo = newMapBlobDescriptorCache() - rsimbdcp.parent.repositories[rsimbdcp.repo] = repo - } - rsimbdcp.repository = repo - } - rsimbdcp.parent.mu.Unlock() - - if err := repo.SetDescriptor(ctx, dgst, desc); err != nil { - return err - } - - return rsimbdcp.parent.SetDescriptor(ctx, dgst, desc) -} - -// mapBlobDescriptorCache provides a simple map-based implementation of the -// descriptor cache. -type mapBlobDescriptorCache struct { - descriptors map[digest.Digest]distribution.Descriptor - mu sync.RWMutex -} - -var _ distribution.BlobDescriptorService = &mapBlobDescriptorCache{} - -func newMapBlobDescriptorCache() *mapBlobDescriptorCache { - return &mapBlobDescriptorCache{ - descriptors: make(map[digest.Digest]distribution.Descriptor), - } -} - -func (mbdc *mapBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - if err := dgst.Validate(); err != nil { - return distribution.Descriptor{}, err - } - - mbdc.mu.RLock() - defer mbdc.mu.RUnlock() - - desc, ok := mbdc.descriptors[dgst] - if !ok { - return distribution.Descriptor{}, distribution.ErrBlobUnknown - } - - return desc, nil -} - -func (mbdc *mapBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { - mbdc.mu.Lock() - defer mbdc.mu.Unlock() - - delete(mbdc.descriptors, dgst) - return nil -} - -func (mbdc *mapBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - if err := dgst.Validate(); err != nil { - return err - } - - if err := cache.ValidateDescriptor(desc); err != nil { - return err - } - - mbdc.mu.Lock() - defer mbdc.mu.Unlock() - - mbdc.descriptors[dgst] = desc - return nil -} diff --git a/vendor/github.com/docker/engine-api/LICENSE b/vendor/github.com/docker/engine-api/LICENSE deleted file mode 100644 index c157bff96..000000000 --- a/vendor/github.com/docker/engine-api/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015-2016 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/engine-api/client/transport/cancellable/LICENSE b/vendor/github.com/docker/engine-api/client/transport/cancellable/LICENSE deleted file mode 100644 index 6a66aea5e..000000000 --- a/vendor/github.com/docker/engine-api/client/transport/cancellable/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/engine-api/types/strslice/strslice.go b/vendor/github.com/docker/engine-api/types/strslice/strslice.go deleted file mode 100644 index bad493fb8..000000000 --- a/vendor/github.com/docker/engine-api/types/strslice/strslice.go +++ /dev/null @@ -1,30 +0,0 @@ -package strslice - -import "encoding/json" - -// StrSlice represents a string or an array of strings. -// We need to override the json decoder to accept both options. -type StrSlice []string - -// UnmarshalJSON decodes the byte slice whether it's a string or an array of -// strings. This method is needed to implement json.Unmarshaler. -func (e *StrSlice) UnmarshalJSON(b []byte) error { - if len(b) == 0 { - // With no input, we preserve the existing value by returning nil and - // leaving the target alone. This allows defining default values for - // the type. - return nil - } - - p := make([]string, 0, 1) - if err := json.Unmarshal(b, &p); err != nil { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - p = append(p, s) - } - - *e = p - return nil -} diff --git a/vendor/github.com/flynn/go-shlex/COPYING b/vendor/github.com/flynn/go-shlex/COPYING deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/flynn/go-shlex/COPYING +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/flynn/go-shlex/shlex.go b/vendor/github.com/flynn/go-shlex/shlex.go deleted file mode 100644 index 7aeace801..000000000 --- a/vendor/github.com/flynn/go-shlex/shlex.go +++ /dev/null @@ -1,457 +0,0 @@ -/* -Copyright 2012 Google Inc. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package shlex - -/* -Package shlex implements a simple lexer which splits input in to tokens using -shell-style rules for quoting and commenting. -*/ -import ( - "bufio" - "errors" - "fmt" - "io" - "strings" -) - -/* -A TokenType is a top-level token; a word, space, comment, unknown. -*/ -type TokenType int - -/* -A RuneTokenType is the type of a UTF-8 character; a character, quote, space, escape. -*/ -type RuneTokenType int - -type lexerState int - -type Token struct { - tokenType TokenType - value string -} - -/* -Two tokens are equal if both their types and values are equal. A nil token can -never equal another token. -*/ -func (a *Token) Equal(b *Token) bool { - if a == nil || b == nil { - return false - } - if a.tokenType != b.tokenType { - return false - } - return a.value == b.value -} - -const ( - RUNE_CHAR string = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789._-,/@$*()+=><:;&^%~|!?[]{}" - RUNE_SPACE string = " \t\r\n" - RUNE_ESCAPING_QUOTE string = "\"" - RUNE_NONESCAPING_QUOTE string = "'" - RUNE_ESCAPE = "\\" - RUNE_COMMENT = "#" - - RUNETOKEN_UNKNOWN RuneTokenType = 0 - RUNETOKEN_CHAR RuneTokenType = 1 - RUNETOKEN_SPACE RuneTokenType = 2 - RUNETOKEN_ESCAPING_QUOTE RuneTokenType = 3 - RUNETOKEN_NONESCAPING_QUOTE RuneTokenType = 4 - RUNETOKEN_ESCAPE RuneTokenType = 5 - RUNETOKEN_COMMENT RuneTokenType = 6 - RUNETOKEN_EOF RuneTokenType = 7 - - TOKEN_UNKNOWN TokenType = 0 - TOKEN_WORD TokenType = 1 - TOKEN_SPACE TokenType = 2 - TOKEN_COMMENT TokenType = 3 - - STATE_START lexerState = 0 - STATE_INWORD lexerState = 1 - STATE_ESCAPING lexerState = 2 - STATE_ESCAPING_QUOTED lexerState = 3 - STATE_QUOTED_ESCAPING lexerState = 4 - STATE_QUOTED lexerState = 5 - STATE_COMMENT lexerState = 6 - - INITIAL_TOKEN_CAPACITY int = 100 -) - -/* -A type for classifying characters. This allows for different sorts of -classifiers - those accepting extended non-ascii chars, or strict posix -compatibility, for example. -*/ -type TokenClassifier struct { - typeMap map[int32]RuneTokenType -} - -func addRuneClass(typeMap *map[int32]RuneTokenType, runes string, tokenType RuneTokenType) { - for _, rune := range runes { - (*typeMap)[int32(rune)] = tokenType - } -} - -/* -Create a new classifier for basic ASCII characters. -*/ -func NewDefaultClassifier() *TokenClassifier { - typeMap := map[int32]RuneTokenType{} - addRuneClass(&typeMap, RUNE_CHAR, RUNETOKEN_CHAR) - addRuneClass(&typeMap, RUNE_SPACE, RUNETOKEN_SPACE) - addRuneClass(&typeMap, RUNE_ESCAPING_QUOTE, RUNETOKEN_ESCAPING_QUOTE) - addRuneClass(&typeMap, RUNE_NONESCAPING_QUOTE, RUNETOKEN_NONESCAPING_QUOTE) - addRuneClass(&typeMap, RUNE_ESCAPE, RUNETOKEN_ESCAPE) - addRuneClass(&typeMap, RUNE_COMMENT, RUNETOKEN_COMMENT) - return &TokenClassifier{ - typeMap: typeMap} -} - -func (classifier *TokenClassifier) ClassifyRune(rune int32) RuneTokenType { - return classifier.typeMap[rune] -} - -/* -A type for turning an input stream in to a sequence of strings. Whitespace and -comments are skipped. -*/ -type Lexer struct { - tokenizer *Tokenizer -} - -/* -Create a new lexer. -*/ -func NewLexer(r io.Reader) (*Lexer, error) { - - tokenizer, err := NewTokenizer(r) - if err != nil { - return nil, err - } - lexer := &Lexer{tokenizer: tokenizer} - return lexer, nil -} - -/* -Return the next word, and an error value. If there are no more words, the error -will be io.EOF. -*/ -func (l *Lexer) NextWord() (string, error) { - var token *Token - var err error - for { - token, err = l.tokenizer.NextToken() - if err != nil { - return "", err - } - switch token.tokenType { - case TOKEN_WORD: - { - return token.value, nil - } - case TOKEN_COMMENT: - { - // skip comments - } - default: - { - panic(fmt.Sprintf("Unknown token type: %v", token.tokenType)) - } - } - } - return "", io.EOF -} - -/* -A type for turning an input stream in to a sequence of typed tokens. -*/ -type Tokenizer struct { - input *bufio.Reader - classifier *TokenClassifier -} - -/* -Create a new tokenizer. -*/ -func NewTokenizer(r io.Reader) (*Tokenizer, error) { - input := bufio.NewReader(r) - classifier := NewDefaultClassifier() - tokenizer := &Tokenizer{ - input: input, - classifier: classifier} - return tokenizer, nil -} - -/* -Scan the stream for the next token. - -This uses an internal state machine. It will panic if it encounters a character -which it does not know how to handle. -*/ -func (t *Tokenizer) scanStream() (*Token, error) { - state := STATE_START - var tokenType TokenType - value := make([]int32, 0, INITIAL_TOKEN_CAPACITY) - var ( - nextRune int32 - nextRuneType RuneTokenType - err error - ) -SCAN: - for { - nextRune, _, err = t.input.ReadRune() - nextRuneType = t.classifier.ClassifyRune(nextRune) - if err != nil { - if err == io.EOF { - nextRuneType = RUNETOKEN_EOF - err = nil - } else { - return nil, err - } - } - switch state { - case STATE_START: // no runes read yet - { - switch nextRuneType { - case RUNETOKEN_EOF: - { - return nil, io.EOF - } - case RUNETOKEN_CHAR: - { - tokenType = TOKEN_WORD - value = append(value, nextRune) - state = STATE_INWORD - } - case RUNETOKEN_SPACE: - { - } - case RUNETOKEN_ESCAPING_QUOTE: - { - tokenType = TOKEN_WORD - state = STATE_QUOTED_ESCAPING - } - case RUNETOKEN_NONESCAPING_QUOTE: - { - tokenType = TOKEN_WORD - state = STATE_QUOTED - } - case RUNETOKEN_ESCAPE: - { - tokenType = TOKEN_WORD - state = STATE_ESCAPING - } - case RUNETOKEN_COMMENT: - { - tokenType = TOKEN_COMMENT - state = STATE_COMMENT - } - default: - { - return nil, errors.New(fmt.Sprintf("Unknown rune: %v", nextRune)) - } - } - } - case STATE_INWORD: // in a regular word - { - switch nextRuneType { - case RUNETOKEN_EOF: - { - break SCAN - } - case RUNETOKEN_CHAR, RUNETOKEN_COMMENT: - { - value = append(value, nextRune) - } - case RUNETOKEN_SPACE: - { - t.input.UnreadRune() - break SCAN - } - case RUNETOKEN_ESCAPING_QUOTE: - { - state = STATE_QUOTED_ESCAPING - } - case RUNETOKEN_NONESCAPING_QUOTE: - { - state = STATE_QUOTED - } - case RUNETOKEN_ESCAPE: - { - state = STATE_ESCAPING - } - default: - { - return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune)) - } - } - } - case STATE_ESCAPING: // the next rune after an escape character - { - switch nextRuneType { - case RUNETOKEN_EOF: - { - err = errors.New("EOF found after escape character") - break SCAN - } - case RUNETOKEN_CHAR, RUNETOKEN_SPACE, RUNETOKEN_ESCAPING_QUOTE, RUNETOKEN_NONESCAPING_QUOTE, RUNETOKEN_ESCAPE, RUNETOKEN_COMMENT: - { - state = STATE_INWORD - value = append(value, nextRune) - } - default: - { - return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune)) - } - } - } - case STATE_ESCAPING_QUOTED: // the next rune after an escape character, in double quotes - { - switch nextRuneType { - case RUNETOKEN_EOF: - { - err = errors.New("EOF found after escape character") - break SCAN - } - case RUNETOKEN_CHAR, RUNETOKEN_SPACE, RUNETOKEN_ESCAPING_QUOTE, RUNETOKEN_NONESCAPING_QUOTE, RUNETOKEN_ESCAPE, RUNETOKEN_COMMENT: - { - state = STATE_QUOTED_ESCAPING - value = append(value, nextRune) - } - default: - { - return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune)) - } - } - } - case STATE_QUOTED_ESCAPING: // in escaping double quotes - { - switch nextRuneType { - case RUNETOKEN_EOF: - { - err = errors.New("EOF found when expecting closing quote.") - break SCAN - } - case RUNETOKEN_CHAR, RUNETOKEN_UNKNOWN, RUNETOKEN_SPACE, RUNETOKEN_NONESCAPING_QUOTE, RUNETOKEN_COMMENT: - { - value = append(value, nextRune) - } - case RUNETOKEN_ESCAPING_QUOTE: - { - state = STATE_INWORD - } - case RUNETOKEN_ESCAPE: - { - state = STATE_ESCAPING_QUOTED - } - default: - { - return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune)) - } - } - } - case STATE_QUOTED: // in non-escaping single quotes - { - switch nextRuneType { - case RUNETOKEN_EOF: - { - err = errors.New("EOF found when expecting closing quote.") - break SCAN - } - case RUNETOKEN_CHAR, RUNETOKEN_UNKNOWN, RUNETOKEN_SPACE, RUNETOKEN_ESCAPING_QUOTE, RUNETOKEN_ESCAPE, RUNETOKEN_COMMENT: - { - value = append(value, nextRune) - } - case RUNETOKEN_NONESCAPING_QUOTE: - { - state = STATE_INWORD - } - default: - { - return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune)) - } - } - } - case STATE_COMMENT: - { - switch nextRuneType { - case RUNETOKEN_EOF: - { - break SCAN - } - case RUNETOKEN_CHAR, RUNETOKEN_UNKNOWN, RUNETOKEN_ESCAPING_QUOTE, RUNETOKEN_ESCAPE, RUNETOKEN_COMMENT, RUNETOKEN_NONESCAPING_QUOTE: - { - value = append(value, nextRune) - } - case RUNETOKEN_SPACE: - { - if nextRune == '\n' { - state = STATE_START - break SCAN - } else { - value = append(value, nextRune) - } - } - default: - { - return nil, errors.New(fmt.Sprintf("Uknown rune: %v", nextRune)) - } - } - } - default: - { - panic(fmt.Sprintf("Unexpected state: %v", state)) - } - } - } - token := &Token{ - tokenType: tokenType, - value: string(value)} - return token, err -} - -/* -Return the next token in the stream, and an error value. If there are no more -tokens available, the error value will be io.EOF. -*/ -func (t *Tokenizer) NextToken() (*Token, error) { - return t.scanStream() -} - -/* -Split a string in to a slice of strings, based upon shell-style rules for -quoting, escaping, and spaces. -*/ -func Split(s string) ([]string, error) { - l, err := NewLexer(strings.NewReader(s)) - if err != nil { - return nil, err - } - subStrings := []string{} - for { - word, err := l.NextWord() - if err != nil { - if err == io.EOF { - return subStrings, nil - } - return subStrings, err - } - subStrings = append(subStrings, word) - } - return subStrings, nil -} diff --git a/vendor/github.com/hyperhq/hyper-api/LICENSE b/vendor/github.com/hyperhq/hyper-api/LICENSE deleted file mode 100644 index c157bff96..000000000 --- a/vendor/github.com/hyperhq/hyper-api/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015-2016 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/hyperhq/hyper-api/client/checkpoint_create.go b/vendor/github.com/hyperhq/hyper-api/client/checkpoint_create.go deleted file mode 100644 index 47e8677ba..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/checkpoint_create.go +++ /dev/null @@ -1,14 +0,0 @@ -package client - -import ( - "context" - - "github.com/hyperhq/hyper-api/types" -) - -// CheckpointCreate creates a checkpoint from the given container with the given name -func (cli *Client) CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error { - resp, err := cli.post(ctx, "/containers/"+container+"/checkpoints", nil, options, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/checkpoint_delete.go b/vendor/github.com/hyperhq/hyper-api/client/checkpoint_delete.go deleted file mode 100644 index 170c8f70b..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/checkpoint_delete.go +++ /dev/null @@ -1,10 +0,0 @@ -package client - -import "context" - -// CheckpointDelete deletes the checkpoint with the given name from the given container -func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, checkpointID string) error { - resp, err := cli.delete(ctx, "/containers/"+containerID+"/checkpoints/"+checkpointID, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/checkpoint_list.go b/vendor/github.com/hyperhq/hyper-api/client/checkpoint_list.go deleted file mode 100644 index d40ff52d8..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/checkpoint_list.go +++ /dev/null @@ -1,22 +0,0 @@ -package client - -import ( - "encoding/json" - - "context" - "github.com/hyperhq/hyper-api/types" -) - -// CheckpointList returns the volumes configured in the docker host. -func (cli *Client) CheckpointList(ctx context.Context, container string) ([]types.Checkpoint, error) { - var checkpoints []types.Checkpoint - - resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", nil, nil) - if err != nil { - return checkpoints, err - } - - err = json.NewDecoder(resp.body).Decode(&checkpoints) - ensureReaderClosed(resp) - return checkpoints, err -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/client.go b/vendor/github.com/hyperhq/hyper-api/client/client.go deleted file mode 100644 index b7566e1e9..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/client.go +++ /dev/null @@ -1,153 +0,0 @@ -package client - -import ( - "fmt" - "net/http" - "net/url" - "os" - "path/filepath" - "strings" - - "github.com/docker/go-connections/tlsconfig" - "github.com/hyperhq/hyper-api/client/transport" -) - -// Client is the API client that performs all operations -// against a docker server. -type Client struct { - // proto holds the client protocol i.e. unix. - proto string - // addr holds the client address. - addr string - // basePath holds the path to prepend to the requests. - basePath string - // transport is the interface to send request with, it implements transport.Client. - transport transport.Client - // Cloud's Config - accessKey string - secretKey string - // version of the server to talk to. - version string - // custom http headers configured by users. - customHTTPHeaders map[string]string - - // region - region string -} - -// NewEnvClient initializes a new API client based on environment variables. -// Use DOCKER_HOST to set the url to the docker server. -// Use DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest. -// Use DOCKER_CERT_PATH to load the tls certificates from. -// Use DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default. -func NewEnvClient() (*Client, error) { - var client *http.Client - if dockerCertPath := os.Getenv("DOCKER_CERT_PATH"); dockerCertPath != "" { - options := tlsconfig.Options{ - CAFile: filepath.Join(dockerCertPath, "ca.pem"), - CertFile: filepath.Join(dockerCertPath, "cert.pem"), - KeyFile: filepath.Join(dockerCertPath, "key.pem"), - InsecureSkipVerify: os.Getenv("DOCKER_TLS_VERIFY") == "", - } - tlsc, err := tlsconfig.Client(options) - if err != nil { - return nil, err - } - - client = &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: tlsc, - }, - } - } - - host := os.Getenv("DOCKER_HOST") - if host == "" { - host = DefaultDockerHost - } - accessKey := os.Getenv("ACCESSKEY") - secretKey := os.Getenv("SECRETKEY") - region := os.Getenv("HYPER_REGION") - return NewClient(host, os.Getenv("DOCKER_API_VERSION"), client, nil, accessKey, secretKey, region) -} - -// NewClient initializes a new API client for the given host and API version. -// It won't send any version information if the version number is empty. -// It uses the given http client as transport. -// It also initializes the custom http headers to add to each request. -func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string, ak, sk, region string) (*Client, error) { - proto, addr, basePath, err := ParseHost(host) - if err != nil { - return nil, err - } - - transport, err := transport.NewTransportWithHTTP(proto, addr, client) - if err != nil { - return nil, err - } - - return &Client{ - proto: proto, - addr: addr, - basePath: basePath, - transport: transport, - accessKey: ak, - secretKey: sk, - version: version, - customHTTPHeaders: httpHeaders, - region: region, - }, nil -} - -// getAPIPath returns the versioned request path to call the api. -// It appends the query parameters to the path if they are not empty. -func (cli *Client) getAPIPath(p string, query url.Values) string { - var apiPath string - if cli.version != "" { - v := strings.TrimPrefix(cli.version, "v") - apiPath = fmt.Sprintf("%s/v%s%s", cli.basePath, v, p) - } else { - apiPath = fmt.Sprintf("%s%s", cli.basePath, p) - } - - u := &url.URL{ - Path: apiPath, - } - if len(query) > 0 { - u.RawQuery = query.Encode() - } - return u.String() -} - -// ClientVersion returns the version string associated with this -// instance of the Client. Note that this value can be changed -// via the DOCKER_API_VERSION env var. -func (cli *Client) ClientVersion() string { - return cli.version -} - -// UpdateClientVersion updates the version string associated with this -// instance of the Client. -func (cli *Client) UpdateClientVersion(v string) { - cli.version = v -} - -// ParseHost verifies that the given host strings is valid. -func ParseHost(host string) (string, string, string, error) { - protoAddrParts := strings.SplitN(host, "://", 2) - if len(protoAddrParts) == 1 { - return "", "", "", fmt.Errorf("unable to parse docker host `%s`", host) - } - - var basePath string - proto, addr := protoAddrParts[0], protoAddrParts[1] - if proto == "tcp" { - parsed, err := url.Parse("tcp://" + addr) - if err != nil { - return "", "", "", err - } - addr = parsed.Host - basePath = parsed.Path - } - return proto, addr, basePath, nil -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/client_darwin.go b/vendor/github.com/hyperhq/hyper-api/client/client_darwin.go deleted file mode 100644 index 4b47a178c..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/client_darwin.go +++ /dev/null @@ -1,4 +0,0 @@ -package client - -// DefaultDockerHost defines os specific default if DOCKER_HOST is unset -const DefaultDockerHost = "tcp://127.0.0.1:2375" diff --git a/vendor/github.com/hyperhq/hyper-api/client/client_unix.go b/vendor/github.com/hyperhq/hyper-api/client/client_unix.go deleted file mode 100644 index 572c5f87a..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/client_unix.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build linux freebsd solaris openbsd - -package client - -// DefaultDockerHost defines os specific default if DOCKER_HOST is unset -const DefaultDockerHost = "unix:///var/run/docker.sock" diff --git a/vendor/github.com/hyperhq/hyper-api/client/client_windows.go b/vendor/github.com/hyperhq/hyper-api/client/client_windows.go deleted file mode 100644 index 07c0c7a77..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/client_windows.go +++ /dev/null @@ -1,4 +0,0 @@ -package client - -// DefaultDockerHost defines os specific default if DOCKER_HOST is unset -const DefaultDockerHost = "npipe:////./pipe/docker_engine" diff --git a/vendor/github.com/hyperhq/hyper-api/client/compose.go b/vendor/github.com/hyperhq/hyper-api/client/compose.go deleted file mode 100644 index 26b4e47e5..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/compose.go +++ /dev/null @@ -1,149 +0,0 @@ -package client - -import ( - "context" - "fmt" - "io" - "net/url" - "strings" - - "github.com/hyperhq/hyper-api/types" - "github.com/hyperhq/libcompose/config" -) - -type composeConfigWrapper struct { - ServiceConfigs *config.ServiceConfigs `json:"ServiceConfigs"` - VolumeConfigs map[string]*config.VolumeConfig `json:"VolumeConfigs"` - NetworkConfigs map[string]*config.NetworkConfig `json:"NetworkConfigs"` - AuthConfigs map[string]types.AuthConfig `json:"auths"` -} - -func (cli *Client) ComposeUp(project string, services []string, c *config.ServiceConfigs, vc map[string]*config.VolumeConfig, nc map[string]*config.NetworkConfig, auth map[string]types.AuthConfig, forcerecreate, norecreate bool) (io.ReadCloser, error) { - query := url.Values{} - query.Set("project", project) - if forcerecreate { - query.Set("forcerecreate", "true") - } - if norecreate { - query.Set("norecreate", "true") - } - if len(services) > 0 { - query.Set("services", strings.Join(services, "}{")) - } - body := composeConfigWrapper{ - ServiceConfigs: c, - VolumeConfigs: vc, - NetworkConfigs: nc, - AuthConfigs: auth, - } - resp, err := cli.post(context.Background(), "/compose/up", query, body, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} - -func (cli *Client) ComposeDown(project string, services []string, rmi string, vol, rmorphans bool) (io.ReadCloser, error) { - - query := url.Values{} - query.Set("project", project) - if rmi != "" { - query.Set("rmi", rmi) - } - if vol { - query.Set("rmvol", "true") - } - if rmorphans { - query.Set("rmorphans", "true") - } - if len(services) > 0 { - query.Set("services", strings.Join(services, "}{")) - } - resp, err := cli.post(context.Background(), "/compose/down", query, nil, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} - -func (cli *Client) ComposeCreate(project string, services []string, c *config.ServiceConfigs, vc map[string]*config.VolumeConfig, nc map[string]*config.NetworkConfig, auth map[string]types.AuthConfig, forcerecreate, norecreate bool) (io.ReadCloser, error) { - query := url.Values{} - query.Set("project", project) - if forcerecreate { - query.Set("forcerecreate", "true") - } - if norecreate { - query.Set("norecreate", "true") - } - if len(services) > 0 { - query.Set("services", strings.Join(services, "}{")) - } - body := composeConfigWrapper{ - ServiceConfigs: c, - VolumeConfigs: vc, - NetworkConfigs: nc, - AuthConfigs: auth, - } - resp, err := cli.post(context.Background(), "/compose/create", query, body, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} - -func (cli *Client) ComposeRm(project string, services []string, rmVol bool) (io.ReadCloser, error) { - query := url.Values{} - query.Set("project", project) - if rmVol { - query.Set("rmvol", "true") - } - if len(services) > 0 { - query.Set("services", strings.Join(services, "}{")) - } - resp, err := cli.post(context.Background(), "/compose/rm", query, nil, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} - -func (cli *Client) ComposeStart(project string, services []string) (io.ReadCloser, error) { - query := url.Values{} - query.Set("project", project) - if len(services) > 0 { - query.Set("services", strings.Join(services, "}{")) - } - resp, err := cli.post(context.Background(), "/compose/start", query, nil, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} - -func (cli *Client) ComposeStop(project string, services []string, timeout int) (io.ReadCloser, error) { - query := url.Values{} - query.Set("project", project) - query.Set("seconds", fmt.Sprintf("%d", timeout)) - if len(services) > 0 { - query.Set("services", strings.Join(services, "}{")) - } - resp, err := cli.post(context.Background(), "/compose/stop", query, nil, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} - -func (cli *Client) ComposeKill(project string, services []string, signal string) (io.ReadCloser, error) { - query := url.Values{} - query.Set("project", project) - query.Set("signal", signal) - if len(services) > 0 { - query.Set("services", strings.Join(services, "}{")) - } - resp, err := cli.post(context.Background(), "/compose/kill", query, nil, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/container_attach.go b/vendor/github.com/hyperhq/hyper-api/client/container_attach.go deleted file mode 100644 index 7f90bd61c..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/container_attach.go +++ /dev/null @@ -1,34 +0,0 @@ -package client - -import ( - "context" - "net/url" - - "github.com/hyperhq/hyper-api/types" -) - -// ContainerAttach attaches a connection to a container in the server. -// It returns a types.HijackedConnection with the hijacked connection -// and the a reader to get output. It's up to the called to close -// the hijacked connection by calling types.HijackedResponse.Close. -func (cli *Client) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) { - query := url.Values{} - if options.Stream { - query.Set("stream", "1") - } - if options.Stdin { - query.Set("stdin", "1") - } - if options.Stdout { - query.Set("stdout", "1") - } - if options.Stderr { - query.Set("stderr", "1") - } - if options.DetachKeys != "" { - query.Set("detachKeys", options.DetachKeys) - } - - headers := map[string][]string{"Content-Type": {"text/plain"}} - return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, headers) -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/container_commit.go b/vendor/github.com/hyperhq/hyper-api/client/container_commit.go deleted file mode 100644 index 52b2e9003..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/container_commit.go +++ /dev/null @@ -1,53 +0,0 @@ -package client - -import ( - "context" - "encoding/json" - "errors" - "net/url" - - distreference "github.com/docker/distribution/reference" - "github.com/hyperhq/hyper-api/types" - "github.com/hyperhq/hyper-api/types/reference" -) - -// ContainerCommit applies changes into a container and creates a new tagged image. -func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.ContainerCommitResponse, error) { - var repository, tag string - if options.Reference != "" { - distributionRef, err := distreference.ParseNamed(options.Reference) - if err != nil { - return types.ContainerCommitResponse{}, err - } - - if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical { - return types.ContainerCommitResponse{}, errors.New("refusing to create a tag with a digest reference") - } - - tag = reference.GetTagFromNamedRef(distributionRef) - repository = distributionRef.Name() - } - - query := url.Values{} - query.Set("container", container) - query.Set("repo", repository) - query.Set("tag", tag) - query.Set("comment", options.Comment) - query.Set("author", options.Author) - for _, change := range options.Changes { - query.Add("changes", change) - } - if options.Pause != true { - query.Set("pause", "0") - } - - var response types.ContainerCommitResponse - resp, err := cli.post(ctx, "/commit", query, options.Config, nil) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) - return response, err -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/container_copy.go b/vendor/github.com/hyperhq/hyper-api/client/container_copy.go deleted file mode 100644 index a031bb9b5..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/container_copy.go +++ /dev/null @@ -1,96 +0,0 @@ -package client - -import ( - "context" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "path/filepath" - "strings" - - "github.com/hyperhq/hyper-api/types" -) - -// ContainerStatPath returns Stat information about a path inside the container filesystem. -func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) { - query := url.Values{} - query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. - - urlStr := fmt.Sprintf("/containers/%s/archive", containerID) - response, err := cli.head(ctx, urlStr, query, nil) - if err != nil { - return types.ContainerPathStat{}, err - } - defer ensureReaderClosed(response) - return getContainerPathStatFromHeader(response.header) -} - -// CopyToContainer copies content into the container filesystem. -func (cli *Client) CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error { - query := url.Values{} - query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. - // Do not allow for an existing directory to be overwritten by a non-directory and vice versa. - if !options.AllowOverwriteDirWithFile { - query.Set("noOverwriteDirNonDir", "true") - } - - apiPath := fmt.Sprintf("/containers/%s/archive", container) - - response, err := cli.putRaw(ctx, apiPath, query, content, nil) - if err != nil { - return err - } - defer ensureReaderClosed(response) - - if response.statusCode != http.StatusOK { - return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) - } - - return nil -} - -// CopyFromContainer gets the content from the container and returns it as a Reader -// to manipulate it in the host. It's up to the caller to close the reader. -func (cli *Client) CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { - query := make(url.Values, 1) - query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API. - - apiPath := fmt.Sprintf("/containers/%s/archive", container) - response, err := cli.get(ctx, apiPath, query, nil) - if err != nil { - return nil, types.ContainerPathStat{}, err - } - - if response.statusCode != http.StatusOK { - return nil, types.ContainerPathStat{}, fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) - } - - // In order to get the copy behavior right, we need to know information - // about both the source and the destination. The response headers include - // stat info about the source that we can use in deciding exactly how to - // copy it locally. Along with the stat info about the local destination, - // we have everything we need to handle the multiple possibilities there - // can be when copying a file/dir from one location to another file/dir. - stat, err := getContainerPathStatFromHeader(response.header) - if err != nil { - return nil, stat, fmt.Errorf("unable to get resource stat from response: %s", err) - } - return response.body, stat, err -} - -func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) { - var stat types.ContainerPathStat - - encodedStat := header.Get("X-Docker-Container-Path-Stat") - statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat)) - - err := json.NewDecoder(statDecoder).Decode(&stat) - if err != nil { - err = fmt.Errorf("unable to decode container path stat header: %s", err) - } - - return stat, err -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/container_create.go b/vendor/github.com/hyperhq/hyper-api/client/container_create.go deleted file mode 100644 index 28d8a0fff..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/container_create.go +++ /dev/null @@ -1,46 +0,0 @@ -package client - -import ( - "encoding/json" - "net/url" - "strings" - - "context" - "github.com/hyperhq/hyper-api/types" - "github.com/hyperhq/hyper-api/types/container" - "github.com/hyperhq/hyper-api/types/network" -) - -type configWrapper struct { - *container.Config - HostConfig *container.HostConfig - NetworkingConfig *network.NetworkingConfig -} - -// ContainerCreate creates a new container based in the given configuration. -// It can be associated with a name, but it's not mandatory. -func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (types.ContainerCreateResponse, error) { - var response types.ContainerCreateResponse - query := url.Values{} - if containerName != "" { - query.Set("name", containerName) - } - - body := configWrapper{ - Config: config, - HostConfig: hostConfig, - NetworkingConfig: networkingConfig, - } - - serverResp, err := cli.post(ctx, "/containers/create", query, body, nil) - if err != nil { - if serverResp != nil && serverResp.statusCode == 404 && strings.Contains(err.Error(), "No such image") { - return response, imageNotFoundError{config.Image} - } - return response, err - } - - err = json.NewDecoder(serverResp.body).Decode(&response) - ensureReaderClosed(serverResp) - return response, err -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/container_diff.go b/vendor/github.com/hyperhq/hyper-api/client/container_diff.go deleted file mode 100644 index a0b8a8eb3..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/container_diff.go +++ /dev/null @@ -1,23 +0,0 @@ -package client - -import ( - "encoding/json" - "net/url" - - "context" - "github.com/hyperhq/hyper-api/types" -) - -// ContainerDiff shows differences in a container filesystem since it was started. -func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]types.ContainerChange, error) { - var changes []types.ContainerChange - - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil) - if err != nil { - return changes, err - } - - err = json.NewDecoder(serverResp.body).Decode(&changes) - ensureReaderClosed(serverResp) - return changes, err -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/container_exec.go b/vendor/github.com/hyperhq/hyper-api/client/container_exec.go deleted file mode 100644 index 0e1ff9408..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/container_exec.go +++ /dev/null @@ -1,49 +0,0 @@ -package client - -import ( - "encoding/json" - - "context" - "github.com/hyperhq/hyper-api/types" -) - -// ContainerExecCreate creates a new exec configuration to run an exec process. -func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.ContainerExecCreateResponse, error) { - var response types.ContainerExecCreateResponse - resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil) - if err != nil { - return response, err - } - err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) - return response, err -} - -// ContainerExecStart starts an exec process already created in the docker host. -func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error { - resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, config, nil) - ensureReaderClosed(resp) - return err -} - -// ContainerExecAttach attaches a connection to an exec process in the server. -// It returns a types.HijackedConnection with the hijacked connection -// and the a reader to get output. It's up to the called to close -// the hijacked connection by calling types.HijackedResponse.Close. -func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) { - headers := map[string][]string{"Content-Type": {"application/json"}} - return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers) -} - -// ContainerExecInspect returns information about a specific exec process on the docker host. -func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) { - var response types.ContainerExecInspect - resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) - return response, err -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/container_export.go b/vendor/github.com/hyperhq/hyper-api/client/container_export.go deleted file mode 100644 index 22cb17229..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/container_export.go +++ /dev/null @@ -1,20 +0,0 @@ -package client - -import ( - "io" - "net/url" - - "context" -) - -// ContainerExport retrieves the raw contents of a container -// and returns them as an io.ReadCloser. It's up to the caller -// to close the stream. -func (cli *Client) ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) { - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil) - if err != nil { - return nil, err - } - - return serverResp.body, nil -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/container_inspect.go b/vendor/github.com/hyperhq/hyper-api/client/container_inspect.go deleted file mode 100644 index c1429b62d..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/container_inspect.go +++ /dev/null @@ -1,54 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "io/ioutil" - "net/http" - "net/url" - - "context" - "github.com/hyperhq/hyper-api/types" -) - -// ContainerInspect returns the container information. -func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) { - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) - if err != nil { - if serverResp.statusCode == http.StatusNotFound { - return types.ContainerJSON{}, containerNotFoundError{containerID} - } - return types.ContainerJSON{}, err - } - - var response types.ContainerJSON - err = json.NewDecoder(serverResp.body).Decode(&response) - ensureReaderClosed(serverResp) - return response, err -} - -// ContainerInspectWithRaw returns the container information and it's raw representation. -func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) { - query := url.Values{} - if getSize { - query.Set("size", "1") - } - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) - if err != nil { - if serverResp.statusCode == http.StatusNotFound { - return types.ContainerJSON{}, nil, containerNotFoundError{containerID} - } - return types.ContainerJSON{}, nil, err - } - defer ensureReaderClosed(serverResp) - - body, err := ioutil.ReadAll(serverResp.body) - if err != nil { - return types.ContainerJSON{}, nil, err - } - - var response types.ContainerJSON - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/container_kill.go b/vendor/github.com/hyperhq/hyper-api/client/container_kill.go deleted file mode 100644 index 83dc9317e..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/container_kill.go +++ /dev/null @@ -1,16 +0,0 @@ -package client - -import ( - "context" - "net/url" -) - -// ContainerKill terminates the container process but does not remove the container from the docker host. -func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error { - query := url.Values{} - query.Set("signal", signal) - - resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/container_list.go b/vendor/github.com/hyperhq/hyper-api/client/container_list.go deleted file mode 100644 index c553a2f44..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/container_list.go +++ /dev/null @@ -1,56 +0,0 @@ -package client - -import ( - "encoding/json" - "net/url" - "strconv" - - "context" - "github.com/hyperhq/hyper-api/types" - "github.com/hyperhq/hyper-api/types/filters" -) - -// ContainerList returns the list of containers in the docker host. -func (cli *Client) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) { - query := url.Values{} - - if options.All { - query.Set("all", "1") - } - - if options.Limit != -1 { - query.Set("limit", strconv.Itoa(options.Limit)) - } - - if options.Since != "" { - query.Set("since", options.Since) - } - - if options.Before != "" { - query.Set("before", options.Before) - } - - if options.Size { - query.Set("size", "1") - } - - if options.Filter.Len() > 0 { - filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filter) - - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/containers/json", query, nil) - if err != nil { - return nil, err - } - - var containers []types.Container - err = json.NewDecoder(resp.body).Decode(&containers) - ensureReaderClosed(resp) - return containers, err -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/container_logs.go b/vendor/github.com/hyperhq/hyper-api/client/container_logs.go deleted file mode 100644 index 0730b7c8c..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/container_logs.go +++ /dev/null @@ -1,51 +0,0 @@ -package client - -import ( - "context" - "io" - "net/url" - "time" - - "github.com/hyperhq/hyper-api/types" - timetypes "github.com/hyperhq/hyper-api/types/time" -) - -// ContainerLogs returns the logs generated by a container in an io.ReadCloser. -// It's up to the caller to close the stream. -func (cli *Client) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) { - query := url.Values{} - if options.ShowStdout { - query.Set("stdout", "1") - } - - if options.ShowStderr { - query.Set("stderr", "1") - } - - if options.Since != "" { - ts, err := timetypes.GetTimestamp(options.Since, time.Now()) - if err != nil { - return nil, err - } - query.Set("since", ts) - } - - if options.Timestamps { - query.Set("timestamps", "1") - } - - if options.Details { - query.Set("details", "1") - } - - if options.Follow { - query.Set("follow", "1") - } - query.Set("tail", options.Tail) - - resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/container_pause.go b/vendor/github.com/hyperhq/hyper-api/client/container_pause.go deleted file mode 100644 index 5b2f46c38..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/container_pause.go +++ /dev/null @@ -1,10 +0,0 @@ -package client - -import "context" - -// ContainerPause pauses the main process of a given container without terminating it. -func (cli *Client) ContainerPause(ctx context.Context, containerID string) error { - resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/container_remove.go b/vendor/github.com/hyperhq/hyper-api/client/container_remove.go deleted file mode 100644 index c5f527624..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/container_remove.go +++ /dev/null @@ -1,32 +0,0 @@ -package client - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/hyperhq/hyper-api/types" -) - -// ContainerRemove kills and removes a container from the docker host. -func (cli *Client) ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) ([]string, error) { - var warnings []string - query := url.Values{} - if options.RemoveVolumes { - query.Set("v", "1") - } - if options.RemoveLinks { - query.Set("link", "1") - } - - if options.Force { - query.Set("force", "1") - } - - resp, err := cli.delete(ctx, "/containers/"+container, query, nil) - if err == nil { - json.NewDecoder(resp.body).Decode(&warnings) - } - ensureReaderClosed(resp) - return warnings, err -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/container_rename.go b/vendor/github.com/hyperhq/hyper-api/client/container_rename.go deleted file mode 100644 index ddf534844..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/container_rename.go +++ /dev/null @@ -1,16 +0,0 @@ -package client - -import ( - "net/url" - - "context" -) - -// ContainerRename changes the name of a given container. -func (cli *Client) ContainerRename(ctx context.Context, containerID, newContainerName string) error { - query := url.Values{} - query.Set("name", newContainerName) - resp, err := cli.post(ctx, "/containers/"+containerID+"/rename", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/container_resize.go b/vendor/github.com/hyperhq/hyper-api/client/container_resize.go deleted file mode 100644 index aa03e6984..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/container_resize.go +++ /dev/null @@ -1,29 +0,0 @@ -package client - -import ( - "net/url" - "strconv" - - "context" - "github.com/hyperhq/hyper-api/types" -) - -// ContainerResize changes the size of the tty for a container. -func (cli *Client) ContainerResize(ctx context.Context, containerID string, options types.ResizeOptions) error { - return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width) -} - -// ContainerExecResize changes the size of the tty for an exec process running inside a container. -func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error { - return cli.resize(ctx, "/exec/"+execID, options.Height, options.Width) -} - -func (cli *Client) resize(ctx context.Context, basePath string, height, width int) error { - query := url.Values{} - query.Set("h", strconv.Itoa(height)) - query.Set("w", strconv.Itoa(width)) - - resp, err := cli.post(ctx, basePath+"/resize", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/container_restart.go b/vendor/github.com/hyperhq/hyper-api/client/container_restart.go deleted file mode 100644 index 0ed61a8f7..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/container_restart.go +++ /dev/null @@ -1,19 +0,0 @@ -package client - -import ( - "net/url" - "strconv" - - "context" -) - -// ContainerRestart stops and starts a container again. -// It makes the daemon to wait for the container to be up again for -// a specific amount of time, given the timeout. -func (cli *Client) ContainerRestart(ctx context.Context, containerID string, timeout int) error { - query := url.Values{} - query.Set("t", strconv.Itoa(timeout)) - resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/container_start.go b/vendor/github.com/hyperhq/hyper-api/client/container_start.go deleted file mode 100644 index 7e6383fa2..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/container_start.go +++ /dev/null @@ -1,17 +0,0 @@ -package client - -import ( - "net/url" - - "context" -) - -// ContainerStart sends a request to the docker daemon to start a container. -func (cli *Client) ContainerStart(ctx context.Context, containerID string, checkpointID string) error { - query := url.Values{} - query.Set("checkpoint", checkpointID) - - resp, err := cli.post(ctx, "/containers/"+containerID+"/start", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/container_stats.go b/vendor/github.com/hyperhq/hyper-api/client/container_stats.go deleted file mode 100644 index 07177a913..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/container_stats.go +++ /dev/null @@ -1,24 +0,0 @@ -package client - -import ( - "io" - "net/url" - - "context" -) - -// ContainerStats returns near realtime stats for a given container. -// It's up to the caller to close the io.ReadCloser returned. -func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (io.ReadCloser, error) { - query := url.Values{} - query.Set("stream", "0") - if stream { - query.Set("stream", "1") - } - - resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) - if err != nil { - return nil, err - } - return resp.body, err -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/container_stop.go b/vendor/github.com/hyperhq/hyper-api/client/container_stop.go deleted file mode 100644 index 8b9a966d5..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/container_stop.go +++ /dev/null @@ -1,18 +0,0 @@ -package client - -import ( - "net/url" - "strconv" - - "context" -) - -// ContainerStop stops a container without terminating the process. -// The process is blocked until the container stops or the timeout expires. -func (cli *Client) ContainerStop(ctx context.Context, containerID string, timeout int) error { - query := url.Values{} - query.Set("t", strconv.Itoa(timeout)) - resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/container_top.go b/vendor/github.com/hyperhq/hyper-api/client/container_top.go deleted file mode 100644 index 9fe1e3800..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/container_top.go +++ /dev/null @@ -1,28 +0,0 @@ -package client - -import ( - "encoding/json" - "net/url" - "strings" - - "context" - "github.com/hyperhq/hyper-api/types" -) - -// ContainerTop shows process information from within a container. -func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (types.ContainerProcessList, error) { - var response types.ContainerProcessList - query := url.Values{} - if len(arguments) > 0 { - query.Set("ps_args", strings.Join(arguments, " ")) - } - - resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) - return response, err -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/container_unpause.go b/vendor/github.com/hyperhq/hyper-api/client/container_unpause.go deleted file mode 100644 index 6eeec9c24..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/container_unpause.go +++ /dev/null @@ -1,10 +0,0 @@ -package client - -import "context" - -// ContainerUnpause resumes the process execution within a container -func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error { - resp, err := cli.post(ctx, "/containers/"+containerID+"/unpause", nil, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/container_update.go b/vendor/github.com/hyperhq/hyper-api/client/container_update.go deleted file mode 100644 index b92b4d819..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/container_update.go +++ /dev/null @@ -1,10 +0,0 @@ -package client - -import "context" - -// ContainerUpdate updates resources of a container -func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig interface{}) error { - resp, err := cli.put(ctx, "/containers/"+containerID, nil, updateConfig, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/container_wait.go b/vendor/github.com/hyperhq/hyper-api/client/container_wait.go deleted file mode 100644 index b0469f0d0..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/container_wait.go +++ /dev/null @@ -1,26 +0,0 @@ -package client - -import ( - "encoding/json" - - "context" - - "github.com/hyperhq/hyper-api/types" -) - -// ContainerWait pauses execution until a container exits. -// It returns the API status code as response of its readiness. -func (cli *Client) ContainerWait(ctx context.Context, containerID string) (int, error) { - resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil) - if err != nil { - return -1, err - } - defer ensureReaderClosed(resp) - - var res types.ContainerWaitResponse - if err := json.NewDecoder(resp.body).Decode(&res); err != nil { - return -1, err - } - - return res.StatusCode, nil -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/cron.go b/vendor/github.com/hyperhq/hyper-api/client/cron.go deleted file mode 100644 index 26c1b5c9a..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/cron.go +++ /dev/null @@ -1,106 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "io/ioutil" - "net/http" - "net/url" - - "context" - "github.com/hyperhq/hyper-api/types" - "github.com/hyperhq/hyper-api/types/filters" -) - -// CronCreate creates a cron in the Hyper_. -func (cli *Client) CronCreate(ctx context.Context, name string, sv types.Cron) (types.Cron, error) { - var cron types.Cron - var r = url.Values{} - r.Set("name", name) - resp, err := cli.post(ctx, "/crons/create", r, sv, nil) - if err != nil { - return cron, err - } - err = json.NewDecoder(resp.body).Decode(&cron) - ensureReaderClosed(resp) - return cron, err -} - -// CronDelete removes a cron from the Hyper_. -func (cli *Client) CronDelete(ctx context.Context, id string) error { - v := url.Values{} - resp, err := cli.delete(ctx, "/crons/"+id, v, nil) - ensureReaderClosed(resp) - return err -} - -// CronList returns the crons configured in the docker host. -func (cli *Client) CronList(ctx context.Context, opts types.CronListOptions) ([]types.Cron, error) { - var crons = []types.Cron{} - query := url.Values{} - - if opts.Filters.Len() > 0 { - filterJSON, err := filters.ToParamWithVersion(cli.version, opts.Filters) - if err != nil { - return crons, err - } - query.Set("filters", filterJSON) - } - resp, err := cli.get(ctx, "/crons", query, nil) - if err != nil { - return crons, err - } - - err = json.NewDecoder(resp.body).Decode(&crons) - ensureReaderClosed(resp) - return crons, err -} - -// CronInspect returns the information about a specific cron in the docker host. -func (cli *Client) CronInspect(ctx context.Context, cronID string) (types.Cron, error) { - cron, _, err := cli.CronInspectWithRaw(ctx, cronID) - return cron, err -} - -// CronInspectWithRaw returns the information about a specific cron in the docker host and it's raw representation -func (cli *Client) CronInspectWithRaw(ctx context.Context, cronID string) (types.Cron, []byte, error) { - var cron types.Cron - resp, err := cli.get(ctx, "/crons/"+cronID, nil, nil) - if err != nil { - if resp.statusCode == http.StatusNotFound { - return cron, nil, cronNotFoundError{cronID} - } - return cron, nil, err - } - defer ensureReaderClosed(resp) - - body, err := ioutil.ReadAll(resp.body) - if err != nil { - return cron, nil, err - } - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&cron) - return cron, body, err -} - -// CronHistory -func (cli *Client) CronHistory(ctx context.Context, id, since, tail string) ([]types.Event, error) { - var ( - es = []types.Event{} - v = url.Values{} - ) - if since != "" { - v.Set("since", since) - } - if tail != "" { - v.Set("tail", tail) - } - resp, err := cli.get(ctx, "/crons/"+id+"/history", v, nil) - if err != nil { - return nil, err - } - defer ensureReaderClosed(resp) - - err = json.NewDecoder(resp.body).Decode(&es) - return es, err -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/errors.go b/vendor/github.com/hyperhq/hyper-api/client/errors.go deleted file mode 100644 index cc6d0571a..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/errors.go +++ /dev/null @@ -1,165 +0,0 @@ -package client - -import ( - "errors" - "fmt" -) - -// ErrConnectionFailed is an error raised when the connection between the client and the server failed. -var ErrConnectionFailed = errors.New("Cannot connect to the Hyper.sh server.") - -// imageNotFoundError implements an error returned when an image is not in the docker host. -type imageNotFoundError struct { - imageID string -} - -// Error returns a string representation of an imageNotFoundError -func (i imageNotFoundError) Error() string { - return fmt.Sprintf("Error: No such image: %s", i.imageID) -} - -// IsErrImageNotFound returns true if the error is caused -// when an image is not found in the docker host. -func IsErrImageNotFound(err error) bool { - _, ok := err.(imageNotFoundError) - return ok -} - -// containerNotFoundError implements an error returned when a container is not in the docker host. -type containerNotFoundError struct { - containerID string -} - -// Error returns a string representation of a containerNotFoundError -func (e containerNotFoundError) Error() string { - return fmt.Sprintf("Error: No such container: %s", e.containerID) -} - -// IsErrContainerNotFound returns true if the error is caused -// when a container is not found in the docker host. -func IsErrContainerNotFound(err error) bool { - _, ok := err.(containerNotFoundError) - return ok -} - -// networkNotFoundError implements an error returned when a network is not in the docker host. -type networkNotFoundError struct { - networkID string -} - -// Error returns a string representation of a networkNotFoundError -func (e networkNotFoundError) Error() string { - return fmt.Sprintf("Error: No such network: %s", e.networkID) -} - -// IsErrNetworkNotFound returns true if the error is caused -// when a network is not found in the docker host. -func IsErrNetworkNotFound(err error) bool { - _, ok := err.(networkNotFoundError) - return ok -} - -// snapshotNotFoundError implements an error returned when a volume is not in the docker host. -type snapshotNotFoundError struct { - snapshotID string -} - -// Error returns a string representation of an networkNotFoundError -func (e snapshotNotFoundError) Error() string { - return fmt.Sprintf("Error: No such snapshot: %s", e.snapshotID) -} - -// volumeNotFoundError implements an error returned when a volume is not in the docker host. -type volumeNotFoundError struct { - volumeID string -} - -// Error returns a string representation of a networkNotFoundError -func (e volumeNotFoundError) Error() string { - return fmt.Sprintf("Error: No such volume: %s", e.volumeID) -} - -// IsErrVolumeNotFound returns true if the error is caused -// when a volume is not found in the docker host. -func IsErrVolumeNotFound(err error) bool { - _, ok := err.(volumeNotFoundError) - return ok -} - -// serviceNotFoundError implements an error returned when a service is not in the docker host. -type serviceNotFoundError struct { - serviceID string -} - -// Error returns a string representation of a networkNotFoundError -func (e serviceNotFoundError) Error() string { - return fmt.Sprintf("Error: No such service: %s", e.serviceID) -} - -// IsErrVolumeNotFound returns true if the error is caused -// when a volume is not found in the docker host. -func IsErrServiceNotFound(err error) bool { - _, ok := err.(serviceNotFoundError) - return ok -} - -// cronNotFoundError implements an error returned when a cron is not in the docker host. -type cronNotFoundError struct { - cronID string -} - -// Error returns a string representation of a networkNotFoundError -func (e cronNotFoundError) Error() string { - return fmt.Sprintf("Error: No such cron job: %s", e.cronID) -} - -// IsErrVolumeNotFound returns true if the error is caused -// when a volume is not found in the docker host. -func IsErrCronNotFound(err error) bool { - _, ok := err.(cronNotFoundError) - return ok -} - -// funcNotFoundError implements an error returned when a func is not in the docker host. -type funcNotFoundError struct { - name string -} - -// Error returns a string representation of a funcNotFoundError -func (e funcNotFoundError) Error() string { - return fmt.Sprintf("Error: No such function: %s", e.name) -} - -// funcCallNotFoundError implements an error returned when a func call is not in the docker host. -type funcCallNotFoundError struct { - id string -} - -// Error returns a string representation of a funcNotFoundError -func (e funcCallNotFoundError) Error() string { - return fmt.Sprintf("Error: No such call id: %s", e.id) -} - -// IsErrFuncNotFound returns true if the error is caused -// when a func is not found in the docker host. -func IsErrFuncNotFound(err error) bool { - _, ok := err.(funcNotFoundError) - return ok -} - -// unauthorizedError represents an authorization error in a remote registry. -type unauthorizedError struct { - cause error -} - -// Error returns a string representation of an unauthorizedError -func (u unauthorizedError) Error() string { - return u.cause.Error() -} - -// IsErrUnauthorized returns true if the error is caused -// when a remote registry authentication fails -func IsErrUnauthorized(err error) bool { - _, ok := err.(unauthorizedError) - return ok -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/events.go b/vendor/github.com/hyperhq/hyper-api/client/events.go deleted file mode 100644 index 3fbf84ced..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/events.go +++ /dev/null @@ -1,48 +0,0 @@ -package client - -import ( - "io" - "net/url" - "time" - - "context" - - "github.com/hyperhq/hyper-api/types" - "github.com/hyperhq/hyper-api/types/filters" - timetypes "github.com/hyperhq/hyper-api/types/time" -) - -// Events returns a stream of events in the daemon in a ReadCloser. -// It's up to the caller to close the stream. -func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (io.ReadCloser, error) { - query := url.Values{} - ref := time.Now() - - if options.Since != "" { - ts, err := timetypes.GetTimestamp(options.Since, ref) - if err != nil { - return nil, err - } - query.Set("since", ts) - } - if options.Until != "" { - ts, err := timetypes.GetTimestamp(options.Until, ref) - if err != nil { - return nil, err - } - query.Set("until", ts) - } - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) - if err != nil { - return nil, err - } - query.Set("filters", filterJSON) - } - - serverResponse, err := cli.get(ctx, "/events", query, nil) - if err != nil { - return nil, err - } - return serverResponse.body, nil -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/fip.go b/vendor/github.com/hyperhq/hyper-api/client/fip.go deleted file mode 100644 index 04c4b43a5..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/fip.go +++ /dev/null @@ -1,90 +0,0 @@ -package client - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/hyperhq/hyper-api/types" - "github.com/hyperhq/hyper-api/types/filters" -) - -func (cli *Client) FipAllocate(ctx context.Context, count string) ([]string, error) { - var result []string - var v = url.Values{} - v.Set("count", count) - serverResp, err := cli.post(ctx, "/fips/allocate", v, nil, nil) - if err != nil { - return result, err - } - - json.NewDecoder(serverResp.body).Decode(&result) - ensureReaderClosed(serverResp) - return result, err -} - -func (cli *Client) FipRelease(ctx context.Context, ip string) error { - var v = url.Values{} - v.Set("ip", ip) - _, err := cli.post(ctx, "/fips/release", v, nil, nil) - if err != nil { - return err - } - return nil -} - -func (cli *Client) FipAttach(ctx context.Context, ip, container string) error { - var v = url.Values{} - v.Set("ip", ip) - v.Set("container", container) - _, err := cli.post(ctx, "/fips/attach", v, nil, nil) - if err != nil { - return err - } - return nil -} - -func (cli *Client) FipDetach(ctx context.Context, container string) (string, error) { - var result string - var v = url.Values{} - v.Set("container", container) - resp, err := cli.post(ctx, "/fips/detach", v, nil, nil) - if err != nil { - return "", err - } - json.NewDecoder(resp.body).Decode(&result) - ensureReaderClosed(resp) - return result, nil -} - -func (cli *Client) FipList(ctx context.Context, options types.NetworkListOptions) ([]map[string]string, error) { - query := url.Values{} - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToParam(options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - var fips []map[string]string - resp, err := cli.get(ctx, "/fips", query, nil) - if err != nil { - return fips, err - } - err = json.NewDecoder(resp.body).Decode(&fips) - ensureReaderClosed(resp) - return fips, err -} - -func (cli *Client) FipName(ctx context.Context, ip, name string) error { - var v = url.Values{} - v.Set("ip", ip) - v.Set("name", name) - resp, err := cli.post(ctx, "/fips/name", v, nil, nil) - if err != nil { - return err - } - ensureReaderClosed(resp) - return nil -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/func.go b/vendor/github.com/hyperhq/hyper-api/client/func.go deleted file mode 100644 index 5bd0d3ab3..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/func.go +++ /dev/null @@ -1,283 +0,0 @@ -package client - -import ( - "bytes" - "context" - "crypto/tls" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "net/http/httputil" - "net/url" - "os" - "path" - "strconv" - - "github.com/hyperhq/hyper-api/types" - "github.com/hyperhq/hyper-api/types/filters" -) - -func newFuncEndpointRequest(region, method, subpath string, query url.Values, body io.Reader) (*http.Request, error) { - endpoint := os.Getenv("HYPER_FUNC_ENDPOINT") - if endpoint == "" { - endpoint = region + ".hyperfunc.io" - } - apiURL, err := url.Parse(endpoint) - if err != nil { - return nil, err - } - apiURL.Scheme = "https" - apiURL.Path = path.Join(apiURL.Path, subpath) - queryStr := query.Encode() - if queryStr != "" { - apiURL.RawQuery = queryStr - } - req, err := http.NewRequest(method, apiURL.String(), body) - if err != nil { - return nil, err - } - return req, nil -} - -func funcEndpointRequestHijack(req *http.Request) (net.Conn, error) { - req.Header.Set("Connection", "Upgrade") - req.Header.Set("Upgrade", "tcp") - conn, err := tls.Dial("tcp", req.URL.Host+":443", &tls.Config{}) - if err != nil { - return nil, err - } - clientConn := httputil.NewClientConn(conn, nil) - resp, err := clientConn.Do(req) - if err != nil { - return nil, err - } - if resp.StatusCode != 101 { - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - return nil, fmt.Errorf("Error response from server: %s", bytes.TrimSpace(body)) - } - respConn, _ := clientConn.Hijack() - return respConn, nil -} - -func funcEndpointRequest(req *http.Request) (*http.Response, error) { - client := &http.Client{Transport: &http.Transport{ - TLSClientConfig: &tls.Config{}, - }} - resp, err := client.Do(req) - if err != nil { - return nil, err - } - status := resp.StatusCode - if status < 200 || status >= 400 { - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - return nil, fmt.Errorf("Error response from server: %s", bytes.TrimSpace(body)) - } - return resp, nil -} - -func (cli *Client) FuncCreate(ctx context.Context, opts types.Func) (types.Func, error) { - var fn types.Func - _, _, err := cli.ImageInspectWithRaw(context.Background(), opts.Config.Image, false) - if err != nil { - return fn, err - } - resp, err := cli.post(ctx, "/funcs/create", nil, opts, nil) - if err != nil { - return fn, err - } - err = json.NewDecoder(resp.body).Decode(&fn) - ensureReaderClosed(resp) - return fn, err -} - -func (cli *Client) FuncUpdate(ctx context.Context, name string, opts types.Func) (types.Func, error) { - var fn types.Func - resp, err := cli.put(ctx, "/funcs/"+name, nil, opts, nil) - if err != nil { - return fn, err - } - err = json.NewDecoder(resp.body).Decode(&fn) - ensureReaderClosed(resp) - return fn, err -} - -func (cli *Client) FuncDelete(ctx context.Context, name string) error { - resp, err := cli.delete(ctx, "/funcs/"+name, nil, nil) - ensureReaderClosed(resp) - return err -} - -func (cli *Client) FuncList(ctx context.Context, opts types.FuncListOptions) ([]types.Func, error) { - var fns = []types.Func{} - query := url.Values{} - - if opts.Filters.Len() > 0 { - filterJSON, err := filters.ToParamWithVersion(cli.version, opts.Filters) - if err != nil { - return fns, err - } - query.Set("filters", filterJSON) - } - resp, err := cli.get(ctx, "/funcs", query, nil) - if err != nil { - return fns, err - } - - err = json.NewDecoder(resp.body).Decode(&fns) - ensureReaderClosed(resp) - return fns, err -} - -func (cli *Client) FuncInspect(ctx context.Context, name string) (types.Func, error) { - fn, _, err := cli.FuncInspectWithRaw(ctx, name) - return fn, err -} - -func (cli *Client) FuncInspectWithRaw(ctx context.Context, name string) (types.Func, []byte, error) { - var fn types.Func - resp, err := cli.get(ctx, "/funcs/"+name, nil, nil) - if err != nil { - if resp.statusCode == http.StatusNotFound { - return fn, nil, funcNotFoundError{name} - } - return fn, nil, err - } - defer ensureReaderClosed(resp) - - body, err := ioutil.ReadAll(resp.body) - if err != nil { - return fn, nil, err - } - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&fn) - return fn, body, err -} - -func (cli *Client) FuncInspectWithCallId(ctx context.Context, id string) (*types.Func, error) { - var fn types.Func - resp, err := cli.get(ctx, "/funcs/call/"+id, nil, nil) - if err != nil { - if resp.statusCode == http.StatusNotFound { - return nil, funcCallNotFoundError{id} - } - return nil, err - } - defer ensureReaderClosed(resp) - - body, err := ioutil.ReadAll(resp.body) - if err != nil { - return nil, err - } - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&fn) - return &fn, err -} - -func (cli *Client) FuncCall(ctx context.Context, region, name string, stdin io.Reader, sync bool) (io.ReadCloser, error) { - fn, _, err := cli.FuncInspectWithRaw(ctx, name) - if err != nil { - return nil, err - } - subpath := "" - if sync { - subpath += "/sync" - } - req, err := newFuncEndpointRequest(region, "POST", path.Join("call", name, fn.UUID, subpath), nil, stdin) - if err != nil { - return nil, err - } - resp, err := funcEndpointRequest(req) - if err != nil { - return nil, err - } - return resp.Body, nil -} - -func (cli *Client) FuncGet(ctx context.Context, region, callId string, wait bool) (io.ReadCloser, error) { - fn, err := cli.FuncInspectWithCallId(ctx, callId) - if err != nil { - return nil, err - } - subpath := callId - if wait { - subpath += "/wait" - } - req, err := newFuncEndpointRequest(region, "GET", path.Join("output", fn.Name, fn.UUID, subpath), nil, nil) - if err != nil { - return nil, err - } - resp, err := funcEndpointRequest(req) - if err != nil { - return nil, err - } - return resp.Body, nil -} - -func (cli *Client) FuncLogs(ctx context.Context, region, name, callId string, follow bool, tail string) (io.ReadCloser, error) { - fn, _, err := cli.FuncInspectWithRaw(ctx, name) - if err != nil { - return nil, err - } - query := url.Values{} - if callId != "" { - query.Set("callid", callId) - } - if follow { - query.Add("follow", strconv.FormatBool(follow)) - } - if tail != "" { - query.Add("tail", tail) - } - req, err := newFuncEndpointRequest(region, "GET", path.Join("logs", name, fn.UUID, ""), query, nil) - if err != nil { - return nil, err - } - if follow { - conn, err := funcEndpointRequestHijack(req) - if err != nil { - return nil, err - } - return conn.(io.ReadCloser), nil - } - resp, err := funcEndpointRequest(req) - if err != nil { - return nil, err - } - return resp.Body, nil -} - -func (cli *Client) FuncStatus(ctx context.Context, region, name string) (*types.FuncStatusResponse, error) { - fn, _, err := cli.FuncInspectWithRaw(ctx, name) - if err != nil { - return nil, err - } - - query := url.Values{} - query.Set("list", strconv.FormatBool(false)) - req, err := newFuncEndpointRequest(region, "GET", path.Join("status", name, fn.UUID), query, nil) - if err != nil { - return nil, err - } - - resp, err := funcEndpointRequest(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var ret types.FuncStatusResponse - err = json.NewDecoder(resp.Body).Decode(&ret) - if err != nil { - return nil, err - } - return &ret, nil -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/hijack.go b/vendor/github.com/hyperhq/hyper-api/client/hijack.go deleted file mode 100644 index d94fd8044..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/hijack.go +++ /dev/null @@ -1,176 +0,0 @@ -package client - -import ( - "context" - "crypto/tls" - "errors" - "net" - "net/http/httputil" - "net/url" - "strings" - "time" - - "github.com/docker/go-connections/sockets" - "github.com/hyperhq/hyper-api/signature" - "github.com/hyperhq/hyper-api/types" -) - -// tlsClientCon holds tls information and a dialed connection. -type tlsClientCon struct { - *tls.Conn - rawConn net.Conn -} - -func (c *tlsClientCon) CloseWrite() error { - // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it - // on its underlying connection. - if conn, ok := c.rawConn.(types.CloseWriter); ok { - return conn.CloseWrite() - } - return nil -} - -// postHijacked sends a POST request and hijacks the connection. -func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) { - bodyEncoded, err := encodeData(body) - if err != nil { - return types.HijackedResponse{}, err - } - - req, err := cli.newRequest("POST", path, query, bodyEncoded, headers) - if err != nil { - return types.HijackedResponse{}, err - } - req.URL.Host = cli.addr - - req.Header.Set("Connection", "Upgrade") - req.Header.Set("Upgrade", "tcp") - - req = signature.Sign4(cli.accessKey, cli.secretKey, req, cli.region) - conn, err := dial(cli.proto, cli.addr, cli.transport.TLSConfig()) - - if err != nil { - if strings.Contains(err.Error(), "connection refused") { - return types.HijackedResponse{}, ErrConnectionFailed - } - return types.HijackedResponse{}, err - } - - // When we set up a TCP connection for hijack, there could be long periods - // of inactivity (a long running command with no output) that in certain - // network setups may cause ECONNTIMEOUT, leaving the client in an unknown - // state. Setting TCP KeepAlive on the socket connection will prohibit - // ECONNTIMEOUT unless the socket connection truly is broken - if tcpConn, ok := conn.(*net.TCPConn); ok { - tcpConn.SetKeepAlive(true) - tcpConn.SetKeepAlivePeriod(30 * time.Second) - } - - clientconn := httputil.NewClientConn(conn, nil) - defer clientconn.Close() - - // Server hijacks the connection, error 'connection closed' expected - resp, err := clientconn.Do(req) - - rwc, br := clientconn.Hijack() - - return types.HijackedResponse{Conn: rwc, Reader: br, Resp: resp}, err -} - -func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) { - return tlsDialWithDialer(new(net.Dialer), network, addr, config) -} - -// We need to copy Go's implementation of tls.Dial (pkg/cryptor/tls/tls.go) in -// order to return our custom tlsClientCon struct which holds both the tls.Conn -// object _and_ its underlying raw connection. The rationale for this is that -// we need to be able to close the write end of the connection when attaching, -// which tls.Conn does not provide. -func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) { - // We want the Timeout and Deadline values from dialer to cover the - // whole process: TCP connection and TLS handshake. This means that we - // also need to start our own timers now. - timeout := dialer.Timeout - - if !dialer.Deadline.IsZero() { - deadlineTimeout := dialer.Deadline.Sub(time.Now()) - if timeout == 0 || deadlineTimeout < timeout { - timeout = deadlineTimeout - } - } - - var errChannel chan error - - if timeout != 0 { - errChannel = make(chan error, 2) - time.AfterFunc(timeout, func() { - errChannel <- errors.New("") - }) - } - - proxyDialer, err := sockets.DialerFromEnvironment(dialer) - if err != nil { - return nil, err - } - - rawConn, err := proxyDialer.Dial(network, addr) - if err != nil { - return nil, err - } - // When we set up a TCP connection for hijack, there could be long periods - // of inactivity (a long running command with no output) that in certain - // network setups may cause ECONNTIMEOUT, leaving the client in an unknown - // state. Setting TCP KeepAlive on the socket connection will prohibit - // ECONNTIMEOUT unless the socket connection truly is broken - if tcpConn, ok := rawConn.(*net.TCPConn); ok { - tcpConn.SetKeepAlive(true) - tcpConn.SetKeepAlivePeriod(30 * time.Second) - } - - colonPos := strings.LastIndex(addr, ":") - if colonPos == -1 { - colonPos = len(addr) - } - hostname := addr[:colonPos] - - // If no ServerName is set, infer the ServerName - // from the hostname we're connecting to. - if config.ServerName == "" { - // Make a copy to avoid polluting argument or default. - c := *config - c.ServerName = hostname - config = &c - } - - conn := tls.Client(rawConn, config) - - if timeout == 0 { - err = conn.Handshake() - } else { - go func() { - errChannel <- conn.Handshake() - }() - - err = <-errChannel - } - - if err != nil { - rawConn.Close() - return nil, err - } - - // This is Docker difference with standard's crypto/tls package: returned a - // wrapper which holds both the TLS and raw connections. - return &tlsClientCon{conn, rawConn}, nil -} - -func dial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { - if tlsConfig != nil && proto != "unix" && proto != "npipe" { - // Notice this isn't Go standard's tls.Dial function - return tlsDial(proto, addr, tlsConfig) - } - if proto == "npipe" { - return sockets.DialPipe(addr, 32*time.Second) - } - return net.Dial(proto, addr) -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/image_build.go b/vendor/github.com/hyperhq/hyper-api/client/image_build.go deleted file mode 100644 index 1107bcf33..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/image_build.go +++ /dev/null @@ -1,119 +0,0 @@ -package client - -import ( - "encoding/base64" - "encoding/json" - "io" - "net/http" - "net/url" - "regexp" - "strconv" - - "context" - - "github.com/hyperhq/hyper-api/types" - "github.com/hyperhq/hyper-api/types/container" -) - -var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`) - -// ImageBuild sends request to the daemon to build images. -// The Body in the response implement an io.ReadCloser and it's up to the caller to -// close it. -func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { - query, err := imageBuildOptionsToQuery(options) - if err != nil { - return types.ImageBuildResponse{}, err - } - - headers := http.Header(make(map[string][]string)) - buf, err := json.Marshal(options.AuthConfigs) - if err != nil { - return types.ImageBuildResponse{}, err - } - headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) - headers.Set("Content-Type", "application/tar") - - serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers) - if err != nil { - return types.ImageBuildResponse{}, err - } - - osType := getDockerOS(serverResp.header.Get("Server")) - - return types.ImageBuildResponse{ - Body: serverResp.body, - OSType: osType, - }, nil -} - -func imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, error) { - query := url.Values{ - "t": options.Tags, - } - if options.SuppressOutput { - query.Set("q", "1") - } - if options.RemoteContext != "" { - query.Set("remote", options.RemoteContext) - } - if options.NoCache { - query.Set("nocache", "1") - } - if options.Remove { - query.Set("rm", "1") - } else { - query.Set("rm", "0") - } - - if options.ForceRemove { - query.Set("forcerm", "1") - } - - if options.PullParent { - query.Set("pull", "1") - } - - if !container.Isolation.IsDefault(options.Isolation) { - query.Set("isolation", string(options.Isolation)) - } - - query.Set("cpusetcpus", options.CPUSetCPUs) - query.Set("cpusetmems", options.CPUSetMems) - query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10)) - query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10)) - query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10)) - query.Set("memory", strconv.FormatInt(options.Memory, 10)) - query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10)) - query.Set("cgroupparent", options.CgroupParent) - query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10)) - query.Set("dockerfile", options.Dockerfile) - - ulimitsJSON, err := json.Marshal(options.Ulimits) - if err != nil { - return query, err - } - query.Set("ulimits", string(ulimitsJSON)) - - buildArgsJSON, err := json.Marshal(options.BuildArgs) - if err != nil { - return query, err - } - query.Set("buildargs", string(buildArgsJSON)) - - labelsJSON, err := json.Marshal(options.Labels) - if err != nil { - return query, err - } - query.Set("labels", string(labelsJSON)) - return query, nil -} - -func getDockerOS(serverHeader string) string { - var osType string - matches := headerRegexp.FindStringSubmatch(serverHeader) - if len(matches) > 0 { - osType = matches[1] - } - return osType -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/image_create.go b/vendor/github.com/hyperhq/hyper-api/client/image_create.go deleted file mode 100644 index d19debdfa..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/image_create.go +++ /dev/null @@ -1,34 +0,0 @@ -package client - -import ( - "io" - "net/url" - - "context" - - "github.com/hyperhq/hyper-api/types" - "github.com/hyperhq/hyper-api/types/reference" -) - -// ImageCreate creates a new image based in the parent options. -// It returns the JSON content in the response body. -func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) { - repository, tag, err := reference.Parse(parentReference) - if err != nil { - return nil, err - } - - query := url.Values{} - query.Set("fromImage", repository) - query.Set("tag", tag) - resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) - if err != nil { - return nil, err - } - return resp.body, nil -} - -func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (*serverResponse, error) { - headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - return cli.post(ctx, "/images/create", query, nil, headers) -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/image_history.go b/vendor/github.com/hyperhq/hyper-api/client/image_history.go deleted file mode 100644 index 5135d20b0..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/image_history.go +++ /dev/null @@ -1,22 +0,0 @@ -package client - -import ( - "encoding/json" - "net/url" - - "context" - "github.com/hyperhq/hyper-api/types" -) - -// ImageHistory returns the changes in an image in history format. -func (cli *Client) ImageHistory(ctx context.Context, imageID string) ([]types.ImageHistory, error) { - var history []types.ImageHistory - serverResp, err := cli.get(ctx, "/images/"+imageID+"/history", url.Values{}, nil) - if err != nil { - return history, err - } - - err = json.NewDecoder(serverResp.body).Decode(&history) - ensureReaderClosed(serverResp) - return history, err -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/image_import.go b/vendor/github.com/hyperhq/hyper-api/client/image_import.go deleted file mode 100644 index be820a09b..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/image_import.go +++ /dev/null @@ -1,37 +0,0 @@ -package client - -import ( - "io" - "net/url" - - "context" - - "github.com/docker/distribution/reference" - "github.com/hyperhq/hyper-api/types" -) - -// ImageImport creates a new image based in the source options. -// It returns the JSON content in the response body. -func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) { - if ref != "" { - //Check if the given image name can be resolved - if _, err := reference.ParseNamed(ref); err != nil { - return nil, err - } - } - - query := url.Values{} - query.Set("fromSrc", source.SourceName) - query.Set("repo", ref) - query.Set("tag", options.Tag) - query.Set("message", options.Message) - for _, change := range options.Changes { - query.Add("changes", change) - } - - resp, err := cli.postRaw(ctx, "/images/create", query, source.Source, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/image_inspect.go b/vendor/github.com/hyperhq/hyper-api/client/image_inspect.go deleted file mode 100644 index d12aff8d0..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/image_inspect.go +++ /dev/null @@ -1,38 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "io/ioutil" - "net/http" - "net/url" - - "context" - "github.com/hyperhq/hyper-api/types" -) - -// ImageInspectWithRaw returns the image information and its raw representation. -func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string, getSize bool) (types.ImageInspect, []byte, error) { - query := url.Values{} - if getSize { - query.Set("size", "1") - } - serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", query, nil) - if err != nil { - if serverResp.statusCode == http.StatusNotFound { - return types.ImageInspect{}, nil, imageNotFoundError{imageID} - } - return types.ImageInspect{}, nil, err - } - defer ensureReaderClosed(serverResp) - - body, err := ioutil.ReadAll(serverResp.body) - if err != nil { - return types.ImageInspect{}, nil, err - } - - var response types.ImageInspect - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/image_list.go b/vendor/github.com/hyperhq/hyper-api/client/image_list.go deleted file mode 100644 index d659194ac..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/image_list.go +++ /dev/null @@ -1,40 +0,0 @@ -package client - -import ( - "encoding/json" - "net/url" - - "context" - "github.com/hyperhq/hyper-api/types" - "github.com/hyperhq/hyper-api/types/filters" -) - -// ImageList returns a list of images in the docker host. -func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]types.Image, error) { - var images []types.Image - query := url.Values{} - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) - if err != nil { - return images, err - } - query.Set("filters", filterJSON) - } - if options.MatchName != "" { - // FIXME rename this parameter, to not be confused with the filters flag - query.Set("filter", options.MatchName) - } - if options.All { - query.Set("all", "1") - } - - serverResp, err := cli.get(ctx, "/images/json", query, nil) - if err != nil { - return images, err - } - - err = json.NewDecoder(serverResp.body).Decode(&images) - ensureReaderClosed(serverResp) - return images, err -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/image_load.go b/vendor/github.com/hyperhq/hyper-api/client/image_load.go deleted file mode 100644 index 85fc2b684..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/image_load.go +++ /dev/null @@ -1,22 +0,0 @@ -package client - -import ( - "net/url" - - "context" - "github.com/hyperhq/hyper-api/types" -) - -// ImageLoad loads an image in the docker host from the client host. -// It's up to the caller to close the io.ReadCloser returned by -// this function. -func (cli *Client) ImageLoad(ctx context.Context, input interface{}) (*types.ImageLoadResponse, error) { - resp, err := cli.post(ctx, "/images/load", url.Values{}, input, nil) - if err != nil { - return nil, err - } - return &types.ImageLoadResponse{ - Body: resp.body, - JSON: resp.header.Get("Content-Type") == "application/json", - }, nil -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/image_load_local.go b/vendor/github.com/hyperhq/hyper-api/client/image_load_local.go deleted file mode 100644 index a0dcc914e..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/image_load_local.go +++ /dev/null @@ -1,83 +0,0 @@ -package client - -import ( - "encoding/json" - "errors" - "io" - "io/ioutil" - "net" - "net/http" - "net/url" - "strconv" - - "context" - "github.com/hyperhq/hyper-api/types" -) - -func (cli *Client) ImageSaveTarFromDaemon(ctx context.Context, imageIDs []string) (io.ReadCloser, error) { - query := url.Values{ - "names": imageIDs, - } - tr := &http.Transport{ - Dial: func(proto, addr string) (conn net.Conn, err error) { - return net.Dial("unix", "/var/run/docker.sock") - }, - } - client := &http.Client{Transport: tr} - resp, err := client.Get("http://d/images/get?" + query.Encode()) - if err != nil { - return nil, err - } - if resp.StatusCode != 200 { - data, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - var dm struct { - Message string `json:"message"` - } - errHead := "Error from local docker daemon: " - if err := json.Unmarshal(data, &dm); err != nil { - return nil, errors.New(errHead + string(data)) - } - return nil, errors.New(errHead + dm.Message) - } - return resp.Body, nil -} - -func (cli *Client) ImageDiff(ctx context.Context, allLayers [][]string, repoTags [][]string) (*types.ImageDiffResponse, error) { - resp, err := cli.post(ctx, "/images/diff", nil, map[string]interface{}{ - "layers": allLayers, - "repoTags": repoTags, - }, nil) - if err != nil { - return nil, err - } - var diffRet types.ImageDiffResponse - err = json.NewDecoder(resp.body).Decode(&diffRet) - ensureReaderClosed(resp) - return &diffRet, nil -} - -func (cli *Client) ImageLoadLocal(ctx context.Context, quiet bool, size int64) (*types.HijackedResponse, error) { - query := url.Values{} - query.Add("file", "true") - query.Add("quiet", strconv.FormatBool(quiet)) - headers := http.Header{} - headers.Add("X-Hyper-Content-Length", strconv.FormatInt(size, 10)) - - resp, err := cli.postHijacked(ctx, "/images/load", query, nil, headers) - if err != nil { - return nil, err - } - - if resp.Resp != nil && resp.Resp.StatusCode != http.StatusSwitchingProtocols { - data, err := ioutil.ReadAll(resp.Resp.Body) - if err != nil { - return nil, err - } - return nil, errors.New("Error response from daemon: " + string(data)) - } - - return &resp, nil -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/image_pull.go b/vendor/github.com/hyperhq/hyper-api/client/image_pull.go deleted file mode 100644 index 780c50472..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/image_pull.go +++ /dev/null @@ -1,46 +0,0 @@ -package client - -import ( - "io" - "net/http" - "net/url" - - "context" - - "github.com/hyperhq/hyper-api/types" - "github.com/hyperhq/hyper-api/types/reference" -) - -// ImagePull requests the docker host to pull an image from a remote registry. -// It executes the privileged function if the operation is unauthorized -// and it tries one more time. -// It's up to the caller to handle the io.ReadCloser and close it properly. -// -// FIXME(vdemeester): there is currently used in a few way in docker/docker -// - if not in trusted content, ref is used to pass the whole reference, and tag is empty -// - if in trusted content, ref is used to pass the reference name, and tag for the digest -func (cli *Client) ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) { - repository, tag, err := reference.Parse(ref) - if err != nil { - return nil, err - } - - query := url.Values{} - query.Set("fromImage", repository) - if tag != "" && !options.All { - query.Set("tag", tag) - } - - resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) - if resp.statusCode == http.StatusProxyAuthRequired { - newAuthHeader, privilegeErr := options.PrivilegeFunc() - if privilegeErr != nil { - return nil, privilegeErr - } - resp, err = cli.tryImageCreate(ctx, query, newAuthHeader) - } - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/image_push.go b/vendor/github.com/hyperhq/hyper-api/client/image_push.go deleted file mode 100644 index c7332f8fc..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/image_push.go +++ /dev/null @@ -1,54 +0,0 @@ -package client - -import ( - "errors" - "io" - "net/http" - "net/url" - - "context" - - distreference "github.com/docker/distribution/reference" - "github.com/hyperhq/hyper-api/types" -) - -// ImagePush requests the docker host to push an image to a remote registry. -// It executes the privileged function if the operation is unauthorized -// and it tries one more time. -// It's up to the caller to handle the io.ReadCloser and close it properly. -func (cli *Client) ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) { - distributionRef, err := distreference.ParseNamed(ref) - if err != nil { - return nil, err - } - - if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical { - return nil, errors.New("cannot push a digest reference") - } - - var tag = "" - if nameTaggedRef, isNamedTagged := distributionRef.(distreference.NamedTagged); isNamedTagged { - tag = nameTaggedRef.Tag() - } - - query := url.Values{} - query.Set("tag", tag) - - resp, err := cli.tryImagePush(ctx, distributionRef.Name(), query, options.RegistryAuth) - if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { - newAuthHeader, privilegeErr := options.PrivilegeFunc() - if privilegeErr != nil { - return nil, privilegeErr - } - resp, err = cli.tryImagePush(ctx, distributionRef.Name(), query, newAuthHeader) - } - if err != nil { - return nil, err - } - return resp.body, nil -} - -func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (*serverResponse, error) { - headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - return cli.post(ctx, "/images/"+imageID+"/push", query, nil, headers) -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/image_remove.go b/vendor/github.com/hyperhq/hyper-api/client/image_remove.go deleted file mode 100644 index c59d2cf62..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/image_remove.go +++ /dev/null @@ -1,31 +0,0 @@ -package client - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/hyperhq/hyper-api/types" -) - -// ImageRemove removes an image from the docker host. -func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]types.ImageDelete, error) { - query := url.Values{} - - if options.Force { - query.Set("force", "1") - } - if !options.PruneChildren { - query.Set("noprune", "1") - } - - resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) - if err != nil { - return nil, err - } - - var dels []types.ImageDelete - err = json.NewDecoder(resp.body).Decode(&dels) - ensureReaderClosed(resp) - return dels, err -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/image_save.go b/vendor/github.com/hyperhq/hyper-api/client/image_save.go deleted file mode 100644 index 1d2bb5876..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/image_save.go +++ /dev/null @@ -1,21 +0,0 @@ -package client - -import ( - "context" - "io" - "net/url" -) - -// ImageSave retrieves one or more images from the docker host as an io.ReadCloser. -// It's up to the caller to store the images and close the stream. -func (cli *Client) ImageSave(ctx context.Context, imageIDs []string) (io.ReadCloser, error) { - query := url.Values{ - "names": imageIDs, - } - - resp, err := cli.get(ctx, "/images/get", query, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/image_search.go b/vendor/github.com/hyperhq/hyper-api/client/image_search.go deleted file mode 100644 index 042c997b6..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/image_search.go +++ /dev/null @@ -1,49 +0,0 @@ -package client - -import ( - "context" - "encoding/json" - "net/http" - "net/url" - - "github.com/hyperhq/hyper-api/types" - "github.com/hyperhq/hyper-api/types/filters" - "github.com/hyperhq/hyper-api/types/registry" -) - -// ImageSearch makes the docker host to search by a term in a remote registry. -// The list of results is not sorted in any fashion. -func (cli *Client) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) { - var results []registry.SearchResult - query := url.Values{} - query.Set("term", term) - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToParam(options.Filters) - if err != nil { - return results, err - } - query.Set("filters", filterJSON) - } - - resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth) - if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { - newAuthHeader, privilegeErr := options.PrivilegeFunc() - if privilegeErr != nil { - return results, privilegeErr - } - resp, err = cli.tryImageSearch(ctx, query, newAuthHeader) - } - if err != nil { - return results, err - } - - err = json.NewDecoder(resp.body).Decode(&results) - ensureReaderClosed(resp) - return results, err -} - -func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (*serverResponse, error) { - headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - return cli.get(ctx, "/images/search", query, headers) -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/image_tag.go b/vendor/github.com/hyperhq/hyper-api/client/image_tag.go deleted file mode 100644 index 423ba4aeb..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/image_tag.go +++ /dev/null @@ -1,37 +0,0 @@ -package client - -import ( - "context" - "errors" - "fmt" - "net/url" - - distreference "github.com/docker/distribution/reference" - "github.com/hyperhq/hyper-api/types" - "github.com/hyperhq/hyper-api/types/reference" -) - -// ImageTag tags an image in the docker host -func (cli *Client) ImageTag(ctx context.Context, imageID, ref string, options types.ImageTagOptions) error { - distributionRef, err := distreference.ParseNamed(ref) - if err != nil { - return fmt.Errorf("Error parsing reference: %q is not a valid repository/tag", ref) - } - - if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical { - return errors.New("refusing to create a tag with a digest reference") - } - - tag := reference.GetTagFromNamedRef(distributionRef) - - query := url.Values{} - query.Set("repo", distributionRef.Name()) - query.Set("tag", tag) - if options.Force { - query.Set("force", "1") - } - - resp, err := cli.post(ctx, "/images/"+imageID+"/tag", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/info.go b/vendor/github.com/hyperhq/hyper-api/client/info.go deleted file mode 100644 index a66c87a45..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/info.go +++ /dev/null @@ -1,26 +0,0 @@ -package client - -import ( - "context" - "encoding/json" - "fmt" - "net/url" - - "github.com/hyperhq/hyper-api/types" -) - -// Info returns information about the docker server. -func (cli *Client) Info(ctx context.Context) (types.Info, error) { - var info types.Info - serverResp, err := cli.get(ctx, "/info", url.Values{}, nil) - if err != nil { - return info, err - } - defer ensureReaderClosed(serverResp) - - if err := json.NewDecoder(serverResp.body).Decode(&info); err != nil { - return info, fmt.Errorf("Error reading remote info: %v", err) - } - - return info, nil -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/interface.go b/vendor/github.com/hyperhq/hyper-api/client/interface.go deleted file mode 100644 index 7265b1b12..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/interface.go +++ /dev/null @@ -1,139 +0,0 @@ -package client - -import ( - "context" - "io" - - "github.com/hyperhq/hyper-api/types" - "github.com/hyperhq/hyper-api/types/container" - "github.com/hyperhq/hyper-api/types/filters" - "github.com/hyperhq/hyper-api/types/network" - "github.com/hyperhq/hyper-api/types/registry" - "github.com/hyperhq/libcompose/config" -) - -// APIClient is an interface that clients that talk with a docker server must implement. -type APIClient interface { - ClientVersion() string - CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error - CheckpointDelete(ctx context.Context, container string, checkpointID string) error - CheckpointList(ctx context.Context, container string) ([]types.Checkpoint, error) - ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) - ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.ContainerCommitResponse, error) - ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (types.ContainerCreateResponse, error) - ContainerDiff(ctx context.Context, container string) ([]types.ContainerChange, error) - ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) - ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.ContainerExecCreateResponse, error) - ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) - ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error - ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error - ContainerExport(ctx context.Context, container string) (io.ReadCloser, error) - ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error) - ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (types.ContainerJSON, []byte, error) - ContainerKill(ctx context.Context, container, signal string) error - ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) - ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) - ContainerPause(ctx context.Context, container string) error - ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) ([]string, error) - ContainerRename(ctx context.Context, container, newContainerName string) error - ContainerResize(ctx context.Context, container string, options types.ResizeOptions) error - ContainerRestart(ctx context.Context, container string, timeout int) error - ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error) - ContainerStats(ctx context.Context, container string, stream bool) (io.ReadCloser, error) - ContainerStart(ctx context.Context, container string, checkpointID string) error - ContainerStop(ctx context.Context, container string, timeout int) error - ContainerTop(ctx context.Context, container string, arguments []string) (types.ContainerProcessList, error) - ContainerUnpause(ctx context.Context, container string) error - ContainerUpdate(ctx context.Context, container string, updateConfig interface{}) error - ContainerWait(ctx context.Context, container string) (int, error) - CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) - CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error - Events(ctx context.Context, options types.EventsOptions) (io.ReadCloser, error) - ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) - ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) - ImageHistory(ctx context.Context, image string) ([]types.ImageHistory, error) - ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) - ImageInspectWithRaw(ctx context.Context, image string, getSize bool) (types.ImageInspect, []byte, error) - ImageList(ctx context.Context, options types.ImageListOptions) ([]types.Image, error) - ImageLoad(ctx context.Context, input interface{}) (*types.ImageLoadResponse, error) - ImageSaveTarFromDaemon(ctx context.Context, imageIDs []string) (io.ReadCloser, error) - ImageDiff(ctx context.Context, allLayers [][]string, repoTags [][]string) (*types.ImageDiffResponse, error) - ImageLoadLocal(ctx context.Context, quiet bool, size int64) (*types.HijackedResponse, error) - ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) - ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) - ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]types.ImageDelete, error) - ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) - ImageSave(ctx context.Context, images []string) (io.ReadCloser, error) - ImageTag(ctx context.Context, image, ref string, options types.ImageTagOptions) error - Info(ctx context.Context) (types.Info, error) - NetworkConnect(ctx context.Context, networkID, container string, config *network.EndpointSettings) error - NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) - NetworkDisconnect(ctx context.Context, networkID, container string, force bool) error - NetworkInspect(ctx context.Context, networkID string) (types.NetworkResource, error) - NetworkInspectWithRaw(ctx context.Context, networkID string) (types.NetworkResource, []byte, error) - NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) - NetworkRemove(ctx context.Context, networkID string) error - RegistryLogin(ctx context.Context, auth types.AuthConfig) (types.AuthResponse, error) - ServerVersion(ctx context.Context) (types.Version, error) - UpdateClientVersion(v string) - VolumeCreate(ctx context.Context, options types.VolumeCreateRequest) (types.Volume, error) - VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) - VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) - VolumeList(ctx context.Context, filter filters.Args) (types.VolumesListResponse, error) - VolumeRemove(ctx context.Context, volumeID string) error - VolumeInitialize(ctx context.Context, options types.VolumesInitializeRequest) (types.VolumesInitializeResponse, error) - VolumeUploadFinish(ctx context.Context, session string) error - - SnapshotCreate(ctx context.Context, options types.SnapshotCreateRequest) (types.Snapshot, error) - SnapshotInspect(ctx context.Context, volumeID string) (types.Snapshot, error) - SnapshotList(ctx context.Context, filter filters.Args) (types.SnapshotsListResponse, error) - SnapshotRemove(ctx context.Context, id string) error - FipAllocate(ctx context.Context, count string) ([]string, error) - FipRelease(ctx context.Context, ip string) error - FipAttach(ctx context.Context, ip, container string) error - FipDetach(ctx context.Context, container string) (string, error) - FipList(ctx context.Context, opts types.NetworkListOptions) ([]map[string]string, error) - FipName(ctx context.Context, ip, name string) error - - SgCreate(ctx context.Context, name string, data io.Reader) error - SgRm(ctx context.Context, name string) error - SgUpdate(ctx context.Context, name string, data io.Reader) error - SgInspect(ctx context.Context, name string) (*types.SecurityGroup, error) - SgLs(ctx context.Context) ([]types.SecurityGroup, error) - - ComposeUp(project string, services []string, c *config.ServiceConfigs, vc map[string]*config.VolumeConfig, nc map[string]*config.NetworkConfig, au map[string]types.AuthConfig, forcerecreate, norecreate bool) (io.ReadCloser, error) - ComposeDown(p string, services []string, rmi string, vol, rmorphans bool) (io.ReadCloser, error) - ComposeCreate(project string, services []string, c *config.ServiceConfigs, vc map[string]*config.VolumeConfig, nc map[string]*config.NetworkConfig, au map[string]types.AuthConfig, forcerecreate, norecreate bool) (io.ReadCloser, error) - ComposeRm(p string, services []string, rmVol bool) (io.ReadCloser, error) - ComposeStart(p string, services []string) (io.ReadCloser, error) - ComposeStop(p string, services []string, timeout int) (io.ReadCloser, error) - ComposeKill(p string, services []string, signal string) (io.ReadCloser, error) - - ServiceCreate(ctx context.Context, sv types.Service) (types.Service, error) - ServiceUpdate(ctx context.Context, name string, sv types.ServiceUpdate) (types.Service, error) - ServiceDelete(ctx context.Context, id string, keep bool) error - ServiceList(ctx context.Context, opts types.ServiceListOptions) ([]types.Service, error) - ServiceInspect(ctx context.Context, serviceID string) (types.Service, error) - ServiceInspectWithRaw(ctx context.Context, serviceID string) (types.Service, []byte, error) - - CronCreate(ctx context.Context, n string, j types.Cron) (types.Cron, error) - CronDelete(ctx context.Context, id string) error - CronHistory(ctx context.Context, id, since, tail string) ([]types.Event, error) - CronList(ctx context.Context, opts types.CronListOptions) ([]types.Cron, error) - CronInspect(ctx context.Context, id string) (types.Cron, error) - CronInspectWithRaw(ctx context.Context, serviceID string) (types.Cron, []byte, error) - - FuncCreate(ctx context.Context, opts types.Func) (types.Func, error) - FuncUpdate(ctx context.Context, name string, opts types.Func) (types.Func, error) - FuncDelete(ctx context.Context, name string) error - FuncList(ctx context.Context, opts types.FuncListOptions) ([]types.Func, error) - FuncInspect(ctx context.Context, name string) (types.Func, error) - FuncInspectWithRaw(ctx context.Context, name string) (types.Func, []byte, error) - FuncCall(ctx context.Context, region, name string, stdin io.Reader, sync bool) (io.ReadCloser, error) - FuncGet(ctx context.Context, region, callID string, wait bool) (io.ReadCloser, error) - FuncLogs(ctx context.Context, region, name, callID string, follow bool, tail string) (io.ReadCloser, error) - FuncStatus(ctx context.Context, region, name string) (*types.FuncStatusResponse, error) -} - -// Ensure that Client always implements APIClient. -var _ APIClient = &Client{} diff --git a/vendor/github.com/hyperhq/hyper-api/client/login.go b/vendor/github.com/hyperhq/hyper-api/client/login.go deleted file mode 100644 index e83f4291a..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/login.go +++ /dev/null @@ -1,28 +0,0 @@ -package client - -import ( - "context" - "encoding/json" - "net/http" - "net/url" - - "github.com/hyperhq/hyper-api/types" -) - -// RegistryLogin authenticates the docker server with a given docker registry. -// It returns UnauthorizerError when the authentication fails. -func (cli *Client) RegistryLogin(ctx context.Context, auth types.AuthConfig) (types.AuthResponse, error) { - resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil) - - if resp != nil && resp.statusCode == http.StatusUnauthorized { - return types.AuthResponse{}, unauthorizedError{err} - } - if err != nil { - return types.AuthResponse{}, err - } - - var response types.AuthResponse - err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) - return response, err -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/network_connect.go b/vendor/github.com/hyperhq/hyper-api/client/network_connect.go deleted file mode 100644 index df47137ab..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/network_connect.go +++ /dev/null @@ -1,18 +0,0 @@ -package client - -import ( - "context" - "github.com/hyperhq/hyper-api/types" - "github.com/hyperhq/hyper-api/types/network" -) - -// NetworkConnect connects a container to an existent network in the docker host. -func (cli *Client) NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error { - nc := types.NetworkConnect{ - Container: containerID, - EndpointConfig: config, - } - resp, err := cli.post(ctx, "/networks/"+networkID+"/connect", nil, nc, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/network_create.go b/vendor/github.com/hyperhq/hyper-api/client/network_create.go deleted file mode 100644 index 7ea0f1da8..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/network_create.go +++ /dev/null @@ -1,25 +0,0 @@ -package client - -import ( - "encoding/json" - - "context" - "github.com/hyperhq/hyper-api/types" -) - -// NetworkCreate creates a new network in the docker host. -func (cli *Client) NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) { - networkCreateRequest := types.NetworkCreateRequest{ - NetworkCreate: options, - Name: name, - } - var response types.NetworkCreateResponse - serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil) - if err != nil { - return response, err - } - - json.NewDecoder(serverResp.body).Decode(&response) - ensureReaderClosed(serverResp) - return response, err -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/network_disconnect.go b/vendor/github.com/hyperhq/hyper-api/client/network_disconnect.go deleted file mode 100644 index 1dd5410d7..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/network_disconnect.go +++ /dev/null @@ -1,14 +0,0 @@ -package client - -import ( - "context" - "github.com/hyperhq/hyper-api/types" -) - -// NetworkDisconnect disconnects a container from an existent network in the docker host. -func (cli *Client) NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error { - nd := types.NetworkDisconnect{Container: containerID, Force: force} - resp, err := cli.post(ctx, "/networks/"+networkID+"/disconnect", nil, nd, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/network_inspect.go b/vendor/github.com/hyperhq/hyper-api/client/network_inspect.go deleted file mode 100644 index 1eedf678c..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/network_inspect.go +++ /dev/null @@ -1,38 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "io/ioutil" - "net/http" - - "context" - "github.com/hyperhq/hyper-api/types" -) - -// NetworkInspect returns the information for a specific network configured in the docker host. -func (cli *Client) NetworkInspect(ctx context.Context, networkID string) (types.NetworkResource, error) { - networkResource, _, err := cli.NetworkInspectWithRaw(ctx, networkID) - return networkResource, err -} - -// NetworkInspectWithRaw returns the information for a specific network configured in the docker host and it's raw representation. -func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string) (types.NetworkResource, []byte, error) { - var networkResource types.NetworkResource - resp, err := cli.get(ctx, "/networks/"+networkID, nil, nil) - if err != nil { - if resp.statusCode == http.StatusNotFound { - return networkResource, nil, networkNotFoundError{networkID} - } - return networkResource, nil, err - } - defer ensureReaderClosed(resp) - - body, err := ioutil.ReadAll(resp.body) - if err != nil { - return networkResource, nil, err - } - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&networkResource) - return networkResource, body, err -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/network_list.go b/vendor/github.com/hyperhq/hyper-api/client/network_list.go deleted file mode 100644 index ea03f134d..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/network_list.go +++ /dev/null @@ -1,31 +0,0 @@ -package client - -import ( - "encoding/json" - "net/url" - - "context" - "github.com/hyperhq/hyper-api/types" - "github.com/hyperhq/hyper-api/types/filters" -) - -// NetworkList returns the list of networks configured in the docker host. -func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) { - query := url.Values{} - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - var networkResources []types.NetworkResource - resp, err := cli.get(ctx, "/networks", query, nil) - if err != nil { - return networkResources, err - } - err = json.NewDecoder(resp.body).Decode(&networkResources) - ensureReaderClosed(resp) - return networkResources, err -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/network_remove.go b/vendor/github.com/hyperhq/hyper-api/client/network_remove.go deleted file mode 100644 index ab0666fdd..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/network_remove.go +++ /dev/null @@ -1,10 +0,0 @@ -package client - -import "context" - -// NetworkRemove removes an existent network from the docker host. -func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error { - resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/request.go b/vendor/github.com/hyperhq/hyper-api/client/request.go deleted file mode 100644 index d002c0e40..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/request.go +++ /dev/null @@ -1,195 +0,0 @@ -package client - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strings" - - "github.com/hyperhq/hyper-api/client/transport/cancellable" - "github.com/hyperhq/hyper-api/signature" -) - -// serverResponse is a wrapper for http API responses. -type serverResponse struct { - body io.ReadCloser - header http.Header - statusCode int -} - -// head sends an http request to the docker API using the method HEAD. -func (cli *Client) head(ctx context.Context, path string, query url.Values, headers map[string][]string) (*serverResponse, error) { - return cli.sendRequest(ctx, "HEAD", path, query, nil, headers) -} - -// getWithContext sends an http request to the docker API using the method GET with a specific go context. -func (cli *Client) get(ctx context.Context, path string, query url.Values, headers map[string][]string) (*serverResponse, error) { - return cli.sendRequest(ctx, "GET", path, query, nil, headers) -} - -// postWithContext sends an http request to the docker API using the method POST with a specific go context. -func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (*serverResponse, error) { - return cli.sendRequest(ctx, "POST", path, query, obj, headers) -} - -func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (*serverResponse, error) { - return cli.sendClientRequest(ctx, "POST", path, query, body, headers) -} - -// put sends an http request to the docker API using the method PUT. -func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (*serverResponse, error) { - return cli.sendRequest(ctx, "PUT", path, query, obj, headers) -} - -// put sends an http request to the docker API using the method PUT. -func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (*serverResponse, error) { - return cli.sendClientRequest(ctx, "PUT", path, query, body, headers) -} - -// delete sends an http request to the docker API using the method DELETE. -func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers map[string][]string) (*serverResponse, error) { - return cli.sendRequest(ctx, "DELETE", path, query, nil, headers) -} - -func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, obj interface{}, headers map[string][]string) (*serverResponse, error) { - var body io.Reader - - if obj != nil { - var err error - body, err = encodeData(obj) - if err != nil { - return nil, err - } - if headers == nil { - headers = make(map[string][]string) - } - headers["Content-Type"] = []string{"application/json"} - } - - return cli.sendClientRequest(ctx, method, path, query, body, headers) -} - -func (cli *Client) sendClientRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers map[string][]string) (*serverResponse, error) { - serverResp := &serverResponse{ - body: nil, - statusCode: -1, - } - - expectedPayload := (method == "POST" || method == "PUT") - if expectedPayload && body == nil { - body = bytes.NewReader([]byte{}) - } - - req, err := cli.newRequest(method, path, query, body, headers) - if err != nil { - return serverResp, err - } - - if cli.proto == "unix" || cli.proto == "npipe" { - // For local communications, it doesn't matter what the host is. We just - // need a valid and meaningful host name. (See #189) - req.Host = "docker" - } - req.URL.Host = cli.addr - req.URL.Scheme = cli.transport.Scheme() - - if expectedPayload && req.Header.Get("Content-Type") == "" { - req.Header.Set("Content-Type", "text/plain") - } - - req = signature.Sign4(cli.accessKey, cli.secretKey, req, cli.region) - resp, err := cancellable.Do(ctx, cli.transport, req) - - if err != nil { - if isTimeout(err) || strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") { - return serverResp, ErrConnectionFailed - } - - if !cli.transport.Secure() && strings.Contains(err.Error(), "malformed HTTP response") { - return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err) - } - - if cli.transport.Secure() && strings.Contains(err.Error(), "remote error: bad certificate") { - return serverResp, fmt.Errorf("The server probably has client authentication (--tlsverify) enabled. Please check your TLS client certification settings: %v", err) - } - - return serverResp, fmt.Errorf("An error occurred trying to connect: %v", err) - } - - if resp != nil { - serverResp.statusCode = resp.StatusCode - } - - if serverResp.statusCode < 200 || serverResp.statusCode >= 400 { - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return serverResp, err - } - if len(body) == 0 { - return serverResp, fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), req.URL) - } - return serverResp, fmt.Errorf("Error response from daemon: %s", bytes.TrimSpace(body)) - } - - serverResp.body = resp.Body - serverResp.header = resp.Header - return serverResp, nil -} - -func (cli *Client) newRequest(method, path string, query url.Values, body io.Reader, headers map[string][]string) (*http.Request, error) { - apiPath := cli.getAPIPath(path, query) - req, err := http.NewRequest(method, apiPath, body) - if err != nil { - return nil, err - } - - // Add CLI Config's HTTP Headers BEFORE we set the Docker headers - // then the user can't change OUR headers - for k, v := range cli.customHTTPHeaders { - req.Header.Set(k, v) - } - - if headers != nil { - for k, v := range headers { - req.Header[k] = v - } - } - - return req, nil -} - -func encodeData(data interface{}) (*bytes.Buffer, error) { - params := bytes.NewBuffer(nil) - if data != nil { - if err := json.NewEncoder(params).Encode(data); err != nil { - return nil, err - } - } - return params, nil -} - -func ensureReaderClosed(response *serverResponse) { - if response != nil && response.body != nil { - // Drain up to 512 bytes and close the body to let the Transport reuse the connection - io.CopyN(ioutil.Discard, response.body, 512) - response.body.Close() - } -} - -func isTimeout(err error) bool { - type timeout interface { - Timeout() bool - } - e := err - switch urlErr := err.(type) { - case *url.Error: - e = urlErr.Err - } - t, ok := e.(timeout) - return ok && t.Timeout() -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/service.go b/vendor/github.com/hyperhq/hyper-api/client/service.go deleted file mode 100644 index bb48e6508..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/service.go +++ /dev/null @@ -1,95 +0,0 @@ -package client - -import ( - "bytes" - "context" - "encoding/json" - "io/ioutil" - "net/http" - "net/url" - - "github.com/hyperhq/hyper-api/types" - "github.com/hyperhq/hyper-api/types/filters" -) - -// ServiceCreate creates a service in the Hyper.sh. -func (cli *Client) ServiceCreate(ctx context.Context, sv types.Service) (types.Service, error) { - var service types.Service - resp, err := cli.post(ctx, "/services/create", nil, sv, nil) - if err != nil { - return service, err - } - err = json.NewDecoder(resp.body).Decode(&service) - ensureReaderClosed(resp) - return service, err -} - -// ServiceUpdate updates a service in the Hyper.sh. -func (cli *Client) ServiceUpdate(ctx context.Context, name string, opts types.ServiceUpdate) (types.Service, error) { - var service types.Service - resp, err := cli.post(ctx, "/services/"+name+"/update", nil, opts, nil) - if err != nil { - return service, err - } - err = json.NewDecoder(resp.body).Decode(&service) - ensureReaderClosed(resp) - return service, err -} - -// ServiceDelete removes a service from the Hyper.sh. -func (cli *Client) ServiceDelete(ctx context.Context, id string, keep bool) error { - v := url.Values{} - v.Set("keey", "yes") - resp, err := cli.delete(ctx, "/services/"+id, v, nil) - ensureReaderClosed(resp) - return err -} - -// ServiceList returns the services configured in the docker host. -func (cli *Client) ServiceList(ctx context.Context, opts types.ServiceListOptions) ([]types.Service, error) { - var services = []types.Service{} - query := url.Values{} - - if opts.Filters.Len() > 0 { - filterJSON, err := filters.ToParamWithVersion(cli.version, opts.Filters) - if err != nil { - return services, err - } - query.Set("filters", filterJSON) - } - resp, err := cli.get(ctx, "/services", query, nil) - if err != nil { - return services, err - } - - err = json.NewDecoder(resp.body).Decode(&services) - ensureReaderClosed(resp) - return services, err -} - -// ServiceInspect returns the information about a specific service in the docker host. -func (cli *Client) ServiceInspect(ctx context.Context, serviceID string) (types.Service, error) { - service, _, err := cli.ServiceInspectWithRaw(ctx, serviceID) - return service, err -} - -// ServiceInspectWithRaw returns the information about a specific service in the docker host and it's raw representation -func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string) (types.Service, []byte, error) { - var service types.Service - resp, err := cli.get(ctx, "/services/"+serviceID, nil, nil) - if err != nil { - if resp.statusCode == http.StatusNotFound { - return service, nil, serviceNotFoundError{serviceID} - } - return service, nil, err - } - defer ensureReaderClosed(resp) - - body, err := ioutil.ReadAll(resp.body) - if err != nil { - return service, nil, err - } - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&service) - return service, body, err -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/sg.go b/vendor/github.com/hyperhq/hyper-api/client/sg.go deleted file mode 100644 index dae3b1417..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/sg.go +++ /dev/null @@ -1,69 +0,0 @@ -package client - -import ( - "context" - "encoding/json" - "io" - "net/url" - - "github.com/hyperhq/hyper-api/types" -) - -func (cli *Client) SgCreate(ctx context.Context, name string, data io.Reader) error { - var v = url.Values{} - serverResp, err := cli.postRaw(ctx, "/sg/"+name, v, data, nil) - if err != nil { - return err - } - - ensureReaderClosed(serverResp) - return nil -} - -func (cli *Client) SgRm(ctx context.Context, name string) error { - var v = url.Values{} - serverResp, err := cli.delete(ctx, "/sg/"+name, v, nil) - if err != nil { - return err - } - - ensureReaderClosed(serverResp) - return nil -} - -func (cli *Client) SgUpdate(ctx context.Context, name string, data io.Reader) error { - var v = url.Values{} - serverResp, err := cli.putRaw(ctx, "/sg/"+name, v, data, nil) - if err != nil { - return err - } - - ensureReaderClosed(serverResp) - return nil -} - -func (cli *Client) SgInspect(ctx context.Context, name string) (*types.SecurityGroup, error) { - var v = url.Values{} - serverResp, err := cli.get(ctx, "/sg/"+name, v, nil) - if err != nil { - return nil, err - } - - var sg types.SecurityGroup - err = json.NewDecoder(serverResp.body).Decode(&sg) - ensureReaderClosed(serverResp) - return &sg, nil -} - -func (cli *Client) SgLs(ctx context.Context) ([]types.SecurityGroup, error) { - var v = url.Values{} - serverResp, err := cli.get(ctx, "/sg", v, nil) - if err != nil { - return nil, err - } - - var sgs []types.SecurityGroup - err = json.NewDecoder(serverResp.body).Decode(&sgs) - ensureReaderClosed(serverResp) - return sgs, nil -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/snapshot.go b/vendor/github.com/hyperhq/hyper-api/client/snapshot.go deleted file mode 100644 index 8da510abd..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/snapshot.go +++ /dev/null @@ -1,73 +0,0 @@ -package client - -import ( - "context" - "encoding/json" - "net/http" - "net/url" - - "github.com/hyperhq/hyper-api/types" - "github.com/hyperhq/hyper-api/types/filters" -) - -// SnapshotList returns the snapshots configured in the docker host. -func (cli *Client) SnapshotList(ctx context.Context, filter filters.Args) (types.SnapshotsListResponse, error) { - var snapshots types.SnapshotsListResponse - query := url.Values{} - - if filter.Len() > 0 { - filterJSON, err := filters.ToParam(filter) - if err != nil { - return snapshots, err - } - query.Set("filters", filterJSON) - } - resp, err := cli.get(ctx, "/snapshots", query, nil) - if err != nil { - return snapshots, err - } - - err = json.NewDecoder(resp.body).Decode(&snapshots) - ensureReaderClosed(resp) - return snapshots, err -} - -// SnapshotInspect returns the information about a specific snapshot in the docker host. -func (cli *Client) SnapshotInspect(ctx context.Context, snapshotID string) (types.Snapshot, error) { - var snapshot types.Snapshot - resp, err := cli.get(ctx, "/snapshots/"+snapshotID, nil, nil) - if err != nil { - if resp.statusCode == http.StatusNotFound { - return snapshot, snapshotNotFoundError{snapshotID} - } - return snapshot, err - } - err = json.NewDecoder(resp.body).Decode(&snapshot) - ensureReaderClosed(resp) - return snapshot, err -} - -// SnapshotCreate creates a snapshot in the docker host. -func (cli *Client) SnapshotCreate(ctx context.Context, options types.SnapshotCreateRequest) (types.Snapshot, error) { - var snapshot types.Snapshot - v := url.Values{} - v.Set("volume", options.Volume) - v.Set("name", options.Name) - if options.Force { - v.Set("force", "true") - } - resp, err := cli.post(ctx, "/snapshots/create", v, options, nil) - if err != nil { - return snapshot, err - } - err = json.NewDecoder(resp.body).Decode(&snapshot) - ensureReaderClosed(resp) - return snapshot, err -} - -// SnapshotRemove removes a snapshot from the docker host. -func (cli *Client) SnapshotRemove(ctx context.Context, snapshotID string) error { - resp, err := cli.delete(ctx, "/snapshots/"+snapshotID, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/transport/cancellable/LICENSE b/vendor/github.com/hyperhq/hyper-api/client/transport/cancellable/LICENSE deleted file mode 100644 index 6a66aea5e..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/transport/cancellable/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/hyperhq/hyper-api/client/transport/cancellable/canceler.go b/vendor/github.com/hyperhq/hyper-api/client/transport/cancellable/canceler.go deleted file mode 100644 index 653995e3d..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/transport/cancellable/canceler.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.5 - -package cancellable - -import ( - "net/http" - - "github.com/hyperhq/hyper-api/client/transport" -) - -func canceler(client transport.Sender, req *http.Request) func() { - // TODO(djd): Respect any existing value of req.Cancel. - ch := make(chan struct{}) - req.Cancel = ch - - return func() { - close(ch) - } -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/transport/cancellable/canceler_go14.go b/vendor/github.com/hyperhq/hyper-api/client/transport/cancellable/canceler_go14.go deleted file mode 100644 index 595024256..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/transport/cancellable/canceler_go14.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.5 - -package cancellable - -import ( - "net/http" - - "github.com/hyperhq/hyper-api/client/transport" -) - -type requestCanceler interface { - CancelRequest(*http.Request) -} - -func canceler(client transport.Sender, req *http.Request) func() { - rc, ok := client.(requestCanceler) - if !ok { - return func() {} - } - return func() { - rc.CancelRequest(req) - } -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/transport/cancellable/cancellable.go b/vendor/github.com/hyperhq/hyper-api/client/transport/cancellable/cancellable.go deleted file mode 100644 index ba3190a42..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/transport/cancellable/cancellable.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package cancellable provides helper function to cancel http requests. -package cancellable - -import ( - "io" - "net/http" - - "github.com/hyperhq/hyper-api/client/transport" - - "golang.org/x/net/context" -) - -func nop() {} - -var ( - testHookContextDoneBeforeHeaders = nop - testHookDoReturned = nop - testHookDidBodyClose = nop -) - -// Do sends an HTTP request with the provided transport.Sender and returns an HTTP response. -// If the client is nil, http.DefaultClient is used. -// If the context is canceled or times out, ctx.Err() will be returned. -// -// FORK INFORMATION: -// -// This function deviates from the upstream version in golang.org/x/net/context/ctxhttp by -// taking a Sender interface rather than a *http.Client directly. That allow us to use -// this funcion with mocked clients and hijacked connections. -func Do(ctx context.Context, client transport.Sender, req *http.Request) (*http.Response, error) { - if client == nil { - client = http.DefaultClient - } - - // Request cancelation changed in Go 1.5, see canceler.go and canceler_go14.go. - cancel := canceler(client, req) - - type responseAndError struct { - resp *http.Response - err error - } - result := make(chan responseAndError, 1) - - go func() { - resp, err := client.Do(req) - testHookDoReturned() - result <- responseAndError{resp, err} - }() - - var resp *http.Response - - select { - case <-ctx.Done(): - testHookContextDoneBeforeHeaders() - cancel() - // Clean up after the goroutine calling client.Do: - go func() { - if r := <-result; r.resp != nil && r.resp.Body != nil { - testHookDidBodyClose() - r.resp.Body.Close() - } - }() - return nil, ctx.Err() - case r := <-result: - var err error - resp, err = r.resp, r.err - if err != nil { - return resp, err - } - } - - c := make(chan struct{}) - go func() { - select { - case <-ctx.Done(): - cancel() - case <-c: - // The response's Body is closed. - } - }() - resp.Body = ¬ifyingReader{resp.Body, c} - - return resp, nil -} - -// notifyingReader is an io.ReadCloser that closes the notify channel after -// Close is called or a Read fails on the underlying ReadCloser. -type notifyingReader struct { - io.ReadCloser - notify chan<- struct{} -} - -func (r *notifyingReader) Read(p []byte) (int, error) { - n, err := r.ReadCloser.Read(p) - if err != nil && r.notify != nil { - close(r.notify) - r.notify = nil - } - return n, err -} - -func (r *notifyingReader) Close() error { - err := r.ReadCloser.Close() - if r.notify != nil { - close(r.notify) - r.notify = nil - } - return err -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/transport/client.go b/vendor/github.com/hyperhq/hyper-api/client/transport/client.go deleted file mode 100644 index 13d4b3ab3..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/transport/client.go +++ /dev/null @@ -1,47 +0,0 @@ -package transport - -import ( - "crypto/tls" - "net/http" -) - -// Sender is an interface that clients must implement -// to be able to send requests to a remote connection. -type Sender interface { - // Do sends request to a remote endpoint. - Do(*http.Request) (*http.Response, error) -} - -// Client is an interface that abstracts all remote connections. -type Client interface { - Sender - // Secure tells whether the connection is secure or not. - Secure() bool - // Scheme returns the connection protocol the client uses. - Scheme() string - // TLSConfig returns any TLS configuration the client uses. - TLSConfig() *tls.Config -} - -// tlsInfo returns information about the TLS configuration. -type tlsInfo struct { - tlsConfig *tls.Config -} - -// TLSConfig returns the TLS configuration. -func (t *tlsInfo) TLSConfig() *tls.Config { - return t.tlsConfig -} - -// Scheme returns protocol scheme to use. -func (t *tlsInfo) Scheme() string { - if t.tlsConfig != nil { - return "https" - } - return "http" -} - -// Secure returns true if there is a TLS configuration. -func (t *tlsInfo) Secure() bool { - return t.tlsConfig != nil -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/transport/tlsconfig_clone.go b/vendor/github.com/hyperhq/hyper-api/client/transport/tlsconfig_clone.go deleted file mode 100644 index 31be0ce51..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/transport/tlsconfig_clone.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !go1.7 - -package transport - -import "crypto/tls" - -// TLSConfigClone returns a clone of tls.Config. This function is provided for -// compatibility for go1.7 that doesn't include this method in stdlib. -func TLSConfigClone(c *tls.Config) *tls.Config { - return c.Clone() -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/transport/tlsconfig_clone_go17.go b/vendor/github.com/hyperhq/hyper-api/client/transport/tlsconfig_clone_go17.go deleted file mode 100644 index a28c9141b..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/transport/tlsconfig_clone_go17.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build go1.7 - -package transport - -import "crypto/tls" - -// TLSConfigClone returns a clone of tls.Config. This function is provided for -// compatibility for go1.7 that doesn't include this method in stdlib. -func TLSConfigClone(c *tls.Config) *tls.Config { - return &tls.Config{ - Rand: c.Rand, - Time: c.Time, - Certificates: c.Certificates, - NameToCertificate: c.NameToCertificate, - GetCertificate: c.GetCertificate, - RootCAs: c.RootCAs, - NextProtos: c.NextProtos, - ServerName: c.ServerName, - ClientAuth: c.ClientAuth, - ClientCAs: c.ClientCAs, - InsecureSkipVerify: c.InsecureSkipVerify, - CipherSuites: c.CipherSuites, - PreferServerCipherSuites: c.PreferServerCipherSuites, - SessionTicketsDisabled: c.SessionTicketsDisabled, - SessionTicketKey: c.SessionTicketKey, - ClientSessionCache: c.ClientSessionCache, - MinVersion: c.MinVersion, - MaxVersion: c.MaxVersion, - CurvePreferences: c.CurvePreferences, - DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled, - Renegotiation: c.Renegotiation, - } -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/transport/transport.go b/vendor/github.com/hyperhq/hyper-api/client/transport/transport.go deleted file mode 100644 index ff28af185..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/transport/transport.go +++ /dev/null @@ -1,57 +0,0 @@ -// Package transport provides function to send request to remote endpoints. -package transport - -import ( - "fmt" - "net/http" - - "github.com/docker/go-connections/sockets" -) - -// apiTransport holds information about the http transport to connect with the API. -type apiTransport struct { - *http.Client - *tlsInfo - transport *http.Transport -} - -// NewTransportWithHTTP creates a new transport based on the provided proto, address and http client. -// It uses Docker's default http transport configuration if the client is nil. -// It does not modify the client's transport if it's not nil. -func NewTransportWithHTTP(proto, addr string, client *http.Client) (Client, error) { - var transport *http.Transport - - if client != nil { - tr, ok := client.Transport.(*http.Transport) - if !ok { - return nil, fmt.Errorf("unable to verify TLS configuration, invalid transport %v", client.Transport) - } - transport = tr - } else { - transport = defaultTransport(proto, addr) - client = &http.Client{ - Transport: transport, - } - } - - return &apiTransport{ - Client: client, - tlsInfo: &tlsInfo{transport.TLSClientConfig}, - transport: transport, - }, nil -} - -// CancelRequest stops a request execution. -func (a *apiTransport) CancelRequest(req *http.Request) { - a.transport.CancelRequest(req) -} - -// defaultTransport creates a new http.Transport with Docker's -// default transport configuration. -func defaultTransport(proto, addr string) *http.Transport { - tr := new(http.Transport) - sockets.ConfigureTransport(tr, proto, addr) - return tr -} - -var _ Client = &apiTransport{} diff --git a/vendor/github.com/hyperhq/hyper-api/client/version.go b/vendor/github.com/hyperhq/hyper-api/client/version.go deleted file mode 100644 index 47de21b8d..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/version.go +++ /dev/null @@ -1,21 +0,0 @@ -package client - -import ( - "context" - "encoding/json" - - "github.com/hyperhq/hyper-api/types" -) - -// ServerVersion returns information of the docker client and server host. -func (cli *Client) ServerVersion(ctx context.Context) (types.Version, error) { - resp, err := cli.get(ctx, "/version", nil, nil) - if err != nil { - return types.Version{}, err - } - - var server types.Version - err = json.NewDecoder(resp.body).Decode(&server) - ensureReaderClosed(resp) - return server, err -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/volume_create.go b/vendor/github.com/hyperhq/hyper-api/client/volume_create.go deleted file mode 100644 index 994c83faa..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/volume_create.go +++ /dev/null @@ -1,20 +0,0 @@ -package client - -import ( - "context" - "encoding/json" - - "github.com/hyperhq/hyper-api/types" -) - -// VolumeCreate creates a volume in the docker host. -func (cli *Client) VolumeCreate(ctx context.Context, options types.VolumeCreateRequest) (types.Volume, error) { - var volume types.Volume - resp, err := cli.post(ctx, "/volumes/create", nil, options, nil) - if err != nil { - return volume, err - } - err = json.NewDecoder(resp.body).Decode(&volume) - ensureReaderClosed(resp) - return volume, err -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/volume_init.go b/vendor/github.com/hyperhq/hyper-api/client/volume_init.go deleted file mode 100644 index f90fe1c87..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/volume_init.go +++ /dev/null @@ -1,39 +0,0 @@ -package client - -import ( - "encoding/json" - "fmt" - "net/http" - "net/url" - - "context" - - "github.com/hyperhq/hyper-api/types" -) - -// VolumeInitialize initializes a volume in the docker host. -func (cli *Client) VolumeInitialize(ctx context.Context, options types.VolumesInitializeRequest) (types.VolumesInitializeResponse, error) { - var volResp types.VolumesInitializeResponse - resp, err := cli.post(ctx, "/volumes/initialize", nil, options, nil) - if err != nil { - return types.VolumesInitializeResponse{}, err - } - err = json.NewDecoder(resp.body).Decode(&volResp) - ensureReaderClosed(resp) - return volResp, err -} - -// VolumeUploadFinish notifies docker host of termination of a volume upload session -func (cli *Client) VolumeUploadFinish(ctx context.Context, session string) error { - v := url.Values{} - v.Set("session", session) - resp, err := cli.put(ctx, "/volumes/uploadfinish", v, nil, nil) - if err != nil { - return err - } - ensureReaderClosed(resp) - if resp.statusCode != http.StatusOK && resp.statusCode != http.StatusNoContent { - return fmt.Errorf("Volume upload finish failed with %d", resp.statusCode) - } - return nil -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/volume_inspect.go b/vendor/github.com/hyperhq/hyper-api/client/volume_inspect.go deleted file mode 100644 index afab639b8..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/volume_inspect.go +++ /dev/null @@ -1,38 +0,0 @@ -package client - -import ( - "bytes" - "context" - "encoding/json" - "io/ioutil" - "net/http" - - "github.com/hyperhq/hyper-api/types" -) - -// VolumeInspect returns the information about a specific volume in the docker host. -func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) { - volume, _, err := cli.VolumeInspectWithRaw(ctx, volumeID) - return volume, err -} - -// VolumeInspectWithRaw returns the information about a specific volume in the docker host and it's raw representation -func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) { - var volume types.Volume - resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil) - if err != nil { - if resp.statusCode == http.StatusNotFound { - return volume, nil, volumeNotFoundError{volumeID} - } - return volume, nil, err - } - defer ensureReaderClosed(resp) - - body, err := ioutil.ReadAll(resp.body) - if err != nil { - return volume, nil, err - } - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&volume) - return volume, body, err -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/volume_list.go b/vendor/github.com/hyperhq/hyper-api/client/volume_list.go deleted file mode 100644 index b3f98bce9..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/volume_list.go +++ /dev/null @@ -1,32 +0,0 @@ -package client - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/hyperhq/hyper-api/types" - "github.com/hyperhq/hyper-api/types/filters" -) - -// VolumeList returns the volumes configured in the docker host. -func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (types.VolumesListResponse, error) { - var volumes types.VolumesListResponse - query := url.Values{} - - if filter.Len() > 0 { - filterJSON, err := filters.ToParamWithVersion(cli.version, filter) - if err != nil { - return volumes, err - } - query.Set("filters", filterJSON) - } - resp, err := cli.get(ctx, "/volumes", query, nil) - if err != nil { - return volumes, err - } - - err = json.NewDecoder(resp.body).Decode(&volumes) - ensureReaderClosed(resp) - return volumes, err -} diff --git a/vendor/github.com/hyperhq/hyper-api/client/volume_remove.go b/vendor/github.com/hyperhq/hyper-api/client/volume_remove.go deleted file mode 100644 index 6a7b4d81b..000000000 --- a/vendor/github.com/hyperhq/hyper-api/client/volume_remove.go +++ /dev/null @@ -1,10 +0,0 @@ -package client - -import "context" - -// VolumeRemove removes a volume from the docker host. -func (cli *Client) VolumeRemove(ctx context.Context, volumeID string) error { - resp, err := cli.delete(ctx, "/volumes/"+volumeID, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/hyperhq/hyper-api/signature/sign.go b/vendor/github.com/hyperhq/hyper-api/signature/sign.go deleted file mode 100644 index db6464cc3..000000000 --- a/vendor/github.com/hyperhq/hyper-api/signature/sign.go +++ /dev/null @@ -1,107 +0,0 @@ -package signature - -import ( - "crypto/hmac" - "crypto/sha1" - "encoding/base64" - "net/http" - "sort" - "strings" -) - -var b64 = base64.StdEncoding - -// ---------------------------------------------------------------------------- -// S3 signing (http://goo.gl/G1LrK) - -var s3ParamsToSign = map[string]bool{ - "acl": true, - "location": true, - "logging": true, - "notification": true, - "partNumber": true, - "policy": true, - "requestPayment": true, - "torrent": true, - "uploadId": true, - "uploads": true, - "versionId": true, - "versioning": true, - "versions": true, - "response-content-type": true, - "response-content-language": true, - "response-expires": true, - "response-cache-control": true, - "response-content-disposition": true, - "response-content-encoding": true, -} - -func makeSign(accessKey, secretKey string, req *http.Request) (string, error) { - var md5, ctype, date, xamz string - var xamzDate bool - var sarray []string - headers := req.Header - req.ParseForm() - params := req.Form - method := req.Method - canonicalPath := req.URL.EscapedPath() - for k, v := range headers { - k = strings.ToLower(k) - switch k { - case "content-md5": - md5 = v[0] - case "content-type": - ctype = v[0] - case "date": - if !xamzDate { - date = v[0] - } - default: - if strings.HasPrefix(k, "x-amz-") { - vall := strings.Join(v, ",") - sarray = append(sarray, k+":"+vall) - if k == "x-amz-date" { - xamzDate = true - date = "" - } - } - } - } - if len(sarray) > 0 { - sort.StringSlice(sarray).Sort() - xamz = strings.Join(sarray, "\n") + "\n" - } - - // expires := false - if v, ok := params["Expires"]; ok { - // Query string request authentication alternative. - // expires = true - date = v[0] - params["HSCAccessKeyId"] = []string{accessKey} - } - - sarray = sarray[0:0] - for k, v := range params { - if s3ParamsToSign[k] { - for _, vi := range v { - if vi == "" { - sarray = append(sarray, k) - } else { - // "When signing you do not encode these values." - sarray = append(sarray, k+"="+vi) - } - } - } - } - if len(sarray) > 0 { - sort.StringSlice(sarray).Sort() - canonicalPath = canonicalPath + "?" + strings.Join(sarray, "&") - } - - payload := method + "\n" + md5 + "\n" + ctype + "\n" + date + "\n" + xamz + canonicalPath - hash := hmac.New(sha1.New, []byte(secretKey)) - hash.Write([]byte(payload)) - signature := make([]byte, b64.EncodedLen(hash.Size())) - b64.Encode(signature, hash.Sum(nil)) - return string(signature), nil -} diff --git a/vendor/github.com/hyperhq/hyper-api/signature/sign4.go b/vendor/github.com/hyperhq/hyper-api/signature/sign4.go deleted file mode 100644 index 95acbbbfd..000000000 --- a/vendor/github.com/hyperhq/hyper-api/signature/sign4.go +++ /dev/null @@ -1,365 +0,0 @@ -/* - Based on the AWS Signature Algorithm Sign4 http://docs.aws.amazon.com/general/latest/gr/sigv4_signing.html - Based on the Implementation of https://github.com/smartystreets/go-aws-auth - - Both Sign and Check - - hostname of Hyper - - change header X-AMZ- to X-Hyper- - - changed normuri, treat // as / -*/ -package signature - -import ( - "bytes" - "crypto/hmac" - "crypto/sha256" - "encoding/hex" - "errors" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "sort" - "strings" - "time" -) - -const ( - headerPrefix = "X-Hyper-" - headerDate = "X-Hyper-Date" - headerContentHash = "X-Hyper-Content-Sha256" - headerAuthz = "Authorization" - - metaAlgorithm = "HYPER-HMAC-SHA256" - - keyPartsPrefix = "HYPER" - keyPartsRequest = "hyper_request" - - timeFormatV4 = "20060102T150405Z" - - reqExpiration = 5 * time.Minute -) - -type AuthnHeader struct { - Algorithm string - AccessKey string - Scope string - SignedHeader string - Signature string - Date string -} - -func Signiture4(secretKey string, req *http.Request, header *AuthnHeader, region string) (bool, error) { - meta := &metadata{ - algorithm: header.Algorithm, - credentialScope: header.Scope, - signedHeaders: header.SignedHeader, - date: header.Date, - region: region, - service: "hyper", - } - - hashedCanonReq, ok := canonicalRequestV4FromMeta(req, meta) - if !ok { - return false, errors.New("payload check error") - } - - stringToSign := metaToSignV4(req, hashedCanonReq, meta) - - signingKey := signingKeyV4(secretKey, meta.date, meta.region, meta.service) - signature := signatureV4(signingKey, stringToSign) - return signature == header.Signature, nil -} - -func Sign4(accessKey, secretKey string, req *http.Request, region string) *http.Request { - - prepareRequestV4(req) - meta := &metadata{} - - // Task 1 - hashedCanonReq := hashedCanonicalRequestV4(req, meta) - - // Task 2 - stringToSign := stringToSignV4(req, hashedCanonReq, meta, region) - - // Task 3 - signingKey := signingKeyV4(secretKey, meta.date, meta.region, meta.service) - signature := signatureV4(signingKey, stringToSign) - - req.Header.Set(headerAuthz, buildAuthHeaderV4(accessKey, signature, meta)) - - return req -} - -// Build Request Steps -func prepareRequestV4(request *http.Request) *http.Request { - necessaryDefaults := map[string]string{ - "Content-Type": "application/json", - headerDate: timestampV4(), - } - - for header, value := range necessaryDefaults { - if request.Header.Get(header) == "" { - request.Header.Set(header, value) - } - } - - if request.URL.Path == "" { - request.URL.Path += "/" - } - - return request -} - -func hashedCanonicalRequestV4(request *http.Request, meta *metadata) string { - // TASK 1. http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html - - payload := readAndReplaceBody(request) - payloadHash := hashSHA256(payload) - request.Header.Set(headerContentHash, payloadHash) - - // Set this in header values to make it appear in the range of headers to sign - request.Header.Set("Host", request.URL.Host) - - var sortedHeaderKeys []string - for key := range request.Header { - switch key { - case "Content-Type", "Content-Md5", "Host": - default: - if !strings.HasPrefix(key, headerPrefix) { - continue - } - } - sortedHeaderKeys = append(sortedHeaderKeys, strings.ToLower(key)) - } - sort.Strings(sortedHeaderKeys) - - var headersToSign string - for _, key := range sortedHeaderKeys { - value := strings.TrimSpace(request.Header.Get(key)) - if key == "host" { - //Hyper(AWS) does not include port in signing request. - if strings.Contains(value, ":") { - split := strings.Split(value, ":") - port := split[1] - if port == "80" || port == "443" { - value = split[0] - } - } - } - headersToSign += key + ":" + value + "\n" - } - meta.signedHeaders = concat(";", sortedHeaderKeys...) - canonicalRequest := concat("\n", request.Method, normuri(request.URL.Path), normquery(request.URL.Query()), headersToSign, meta.signedHeaders, payloadHash) - - return hashSHA256([]byte(canonicalRequest)) -} - -func canonicalRequestV4FromMeta(request *http.Request, meta *metadata) (string, bool) { - payload := readPayload(request) - payloadHash := hashSHA256(payload) - if request.Header.Get(headerContentHash) != payloadHash { - return "", false - } - var headersToSign string - for _, hdr := range strings.Split(meta.signedHeaders, ";") { - value := strings.TrimSpace(request.Header.Get(hdr)) - if hdr == "host" { - //Hyper(AWS) does not include port in signing request. - if strings.Contains(value, ":") { - split := strings.Split(value, ":") - port := split[1] - if port == "80" || port == "443" { - value = split[0] - } - } - } - headersToSign += hdr + ":" + value + "\n" - } - canonicalRequest := concat("\n", request.Method, normuri(request.URL.Path), normquery(request.URL.Query()), headersToSign, meta.signedHeaders, payloadHash) - return canonicalRequest, true -} - -func stringToSignV4(request *http.Request, hashedCanonReq string, meta *metadata, region string) string { - // TASK 2. http://docs.aws.amazon.com/general/latest/gr/sigv4-create-string-to-sign.html - - requestTs := request.Header.Get(headerDate) - - meta.algorithm = metaAlgorithm - meta.service, meta.region = serviceAndRegion(request.Host, region) - meta.date = tsDateV4(requestTs) - meta.credentialScope = concat("/", meta.date, meta.region, meta.service, keyPartsRequest) - - return concat("\n", meta.algorithm, requestTs, meta.credentialScope, hashedCanonReq) -} - -func metaToSignV4(request *http.Request, hashedCanonReq string, meta *metadata) string { - return concat("\n", meta.algorithm, request.Header.Get(headerDate), meta.credentialScope, hashedCanonReq) -} - -func signingKeyV4(secretKey, date, region, service string) []byte { - kDate := hmacSHA256([]byte(keyPartsPrefix+secretKey), date) - kRegion := hmacSHA256(kDate, region) - kService := hmacSHA256(kRegion, service) - kSigning := hmacSHA256(kService, keyPartsRequest) - return kSigning -} - -func signatureV4(signingKey []byte, stringToSign string) string { - // TASK 3. http://docs.aws.amazon.com/general/latest/gr/sigv4-calculate-signature.html - - return hex.EncodeToString(hmacSHA256(signingKey, stringToSign)) -} - -func buildAuthHeaderV4(accessKey, signature string, meta *metadata) string { - credential := accessKey + "/" + meta.credentialScope - - return meta.algorithm + - " Credential=" + credential + - ", SignedHeaders=" + meta.signedHeaders + - ", Signature=" + signature -} - -// Check Request Steps -func validateExpire(req *http.Request) bool { - dh := req.Header.Get(headerDate) - if dh == "" { - return false - } - date, err := time.ParseInLocation(timeFormatV4, dh, time.UTC) - if err != nil { - return false - } - if date.Add(reqExpiration).Before(time.Now().UTC()) { - return false - } - return true -} - -// Details -type metadata struct { - algorithm string - credentialScope string - signedHeaders string - date string - region string - service string -} - -func timestampV4() string { - return time.Now().UTC().Format(timeFormatV4) -} - -func readAndReplaceBody(request *http.Request) []byte { - if request.Body == nil { - return []byte{} - } - payload, _ := ioutil.ReadAll(request.Body) - request.Body = ioutil.NopCloser(bytes.NewReader(payload)) - return payload -} - -func readPayload(req *http.Request) []byte { - if req.Body == nil { - return []byte{} - } - payload, _ := ioutil.ReadAll(req.Body) - return payload -} - -func hmacSHA256(key []byte, content string) []byte { - mac := hmac.New(sha256.New, key) - mac.Write([]byte(content)) - return mac.Sum(nil) -} - -func hashSHA256(content []byte) string { - h := sha256.New() - h.Write(content) - return fmt.Sprintf("%x", h.Sum(nil)) -} - -func concat(delim string, str ...string) string { - return strings.Join(str, delim) -} - -func normuri(uri string) string { - parts := []string{} - for _, s := range strings.Split(uri, "/") { - if s == "" { - //bypass empty path segments - continue - } - parts = append(parts, encodePathFrag(s)) - } - return strings.Join(parts, "/") -} - -func encodePathFrag(s string) string { - hexCount := 0 - for i := 0; i < len(s); i++ { - c := s[i] - if shouldEscape(c) { - hexCount++ - } - } - t := make([]byte, len(s)+2*hexCount) - j := 0 - for i := 0; i < len(s); i++ { - c := s[i] - if shouldEscape(c) { - t[j] = '%' - t[j+1] = "0123456789ABCDEF"[c>>4] - t[j+2] = "0123456789ABCDEF"[c&15] - j += 3 - } else { - t[j] = c - j++ - } - } - return string(t) -} - -func shouldEscape(c byte) bool { - if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' { - return false - } - if '0' <= c && c <= '9' { - return false - } - if c == '-' || c == '_' || c == '.' || c == '~' { - return false - } - return true -} - -func normquery(v url.Values) string { - queryString := v.Encode() - - // Go encodes a space as '+' but Amazon requires '%20'. Luckily any '+' in the - // original query string has been percent escaped so all '+' chars that are left - // were originally spaces. - - return strings.Replace(queryString, "+", "%20", -1) -} - -// serviceAndRegion parsers a hostname to find out which ones it is. -func serviceAndRegion(host, r string) (service string, region string) { - // These are the defaults if the hostname doesn't suggest something else - region = r - service = "hyper" - - // region.hyper.sh - if strings.HasSuffix(host, ".hyper.sh") { - parts := strings.SplitN(host, ".", 2) - if parts[1] == "hyper.sh" { - region = parts[0] - } - } - // no more service yet - - return -} - -func tsDateV4(timestamp string) string { - return timestamp[:8] -} diff --git a/vendor/github.com/hyperhq/hyper-api/types/auth.go b/vendor/github.com/hyperhq/hyper-api/types/auth.go deleted file mode 100644 index 056af6b84..000000000 --- a/vendor/github.com/hyperhq/hyper-api/types/auth.go +++ /dev/null @@ -1,22 +0,0 @@ -package types - -// AuthConfig contains authorization information for connecting to a Registry -type AuthConfig struct { - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - Auth string `json:"auth,omitempty"` - - // Email is an optional value associated with the username. - // This field is deprecated and will be removed in a later - // version of docker. - Email string `json:"email,omitempty"` - - ServerAddress string `json:"serveraddress,omitempty"` - - // IdentityToken is used to authenticate the user and get - // an access token for the registry. - IdentityToken string `json:"identitytoken,omitempty"` - - // RegistryToken is a bearer token to be sent to a registry - RegistryToken string `json:"registrytoken,omitempty"` -} diff --git a/vendor/github.com/hyperhq/hyper-api/types/blkiodev/blkio.go b/vendor/github.com/hyperhq/hyper-api/types/blkiodev/blkio.go deleted file mode 100644 index 931ae10ab..000000000 --- a/vendor/github.com/hyperhq/hyper-api/types/blkiodev/blkio.go +++ /dev/null @@ -1,23 +0,0 @@ -package blkiodev - -import "fmt" - -// WeightDevice is a structure that holds device:weight pair -type WeightDevice struct { - Path string - Weight uint16 -} - -func (w *WeightDevice) String() string { - return fmt.Sprintf("%s:%d", w.Path, w.Weight) -} - -// ThrottleDevice is a structure that holds device:rate_per_second pair -type ThrottleDevice struct { - Path string - Rate uint64 -} - -func (t *ThrottleDevice) String() string { - return fmt.Sprintf("%s:%d", t.Path, t.Rate) -} diff --git a/vendor/github.com/hyperhq/hyper-api/types/client.go b/vendor/github.com/hyperhq/hyper-api/types/client.go deleted file mode 100644 index 83d9409d8..000000000 --- a/vendor/github.com/hyperhq/hyper-api/types/client.go +++ /dev/null @@ -1,249 +0,0 @@ -package types - -import ( - "bufio" - "io" - "net" - "net/http" - - "github.com/docker/go-units" - "github.com/hyperhq/hyper-api/types/container" - "github.com/hyperhq/hyper-api/types/filters" -) - -// CheckpointCreateOptions holds parameters to create a checkpoint from a container -type CheckpointCreateOptions struct { - CheckpointID string - Exit bool -} - -// ContainerAttachOptions holds parameters to attach to a container. -type ContainerAttachOptions struct { - Stream bool - Stdin bool - Stdout bool - Stderr bool - DetachKeys string -} - -// ContainerCommitOptions holds parameters to commit changes into a container. -type ContainerCommitOptions struct { - Reference string - Comment string - Author string - Changes []string - Pause bool - Config *container.Config -} - -// ContainerExecInspect holds information returned by exec inspect. -type ContainerExecInspect struct { - ExecID string - ContainerID string - Running bool - ExitCode int -} - -// ContainerListOptions holds parameters to list containers with. -type ContainerListOptions struct { - Quiet bool - Size bool - All bool - Latest bool - Since string - Before string - Limit int - Filter filters.Args -} - -// ContainerLogsOptions holds parameters to filter logs with. -type ContainerLogsOptions struct { - ShowStdout bool - ShowStderr bool - Since string - Timestamps bool - Follow bool - Tail string - Details bool -} - -// ContainerRemoveOptions holds parameters to remove containers. -type ContainerRemoveOptions struct { - RemoveVolumes bool - RemoveLinks bool - Force bool -} - -// CopyToContainerOptions holds information -// about files to copy into a container -type CopyToContainerOptions struct { - AllowOverwriteDirWithFile bool -} - -// EventsOptions hold parameters to filter events with. -type EventsOptions struct { - Since string - Until string - Filters filters.Args -} - -// NetworkListOptions holds parameters to filter the list of networks with. -type NetworkListOptions struct { - Filters filters.Args -} - -// HijackedResponse holds connection information for a hijacked request. -type HijackedResponse struct { - Conn net.Conn - Reader *bufio.Reader - Resp *http.Response -} - -// Close closes the hijacked connection and reader. -func (h *HijackedResponse) Close() { - h.Conn.Close() -} - -// CloseWriter is an interface that implements structs -// that close input streams to prevent from writing. -type CloseWriter interface { - CloseWrite() error -} - -// CloseWrite closes a readWriter for writing. -func (h *HijackedResponse) CloseWrite() error { - if conn, ok := h.Conn.(CloseWriter); ok { - return conn.CloseWrite() - } - return nil -} - -// ImageBuildOptions holds the information -// necessary to build images. -type ImageBuildOptions struct { - Tags []string - SuppressOutput bool - RemoteContext string - NoCache bool - Remove bool - ForceRemove bool - PullParent bool - Isolation container.Isolation - CPUSetCPUs string - CPUSetMems string - CPUShares int64 - CPUQuota int64 - CPUPeriod int64 - Memory int64 - MemorySwap int64 - CgroupParent string - ShmSize int64 - Dockerfile string - Ulimits []*units.Ulimit - BuildArgs map[string]string - AuthConfigs map[string]AuthConfig - Context io.Reader - Labels map[string]string -} - -// ImageBuildResponse holds information -// returned by a server after building -// an image. -type ImageBuildResponse struct { - Body io.ReadCloser - OSType string -} - -// ImageCreateOptions holds information to create images. -type ImageCreateOptions struct { - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry -} - -// ImageImportSource holds source information for ImageImport -type ImageImportSource struct { - Source io.Reader // Source is the data to send to the server to create this image from (mutually exclusive with SourceName) - SourceName string // SourceName is the name of the image to pull (mutually exclusive with Source) -} - -// ImageImportOptions holds information to import images from the client host. -type ImageImportOptions struct { - Tag string // Tag is the name to tag this image with. This attribute is deprecated. - Message string // Message is the message to tag the image with - Changes []string // Changes are the raw changes to apply to this image -} - -// ImageListOptions holds parameters to filter the list of images with. -type ImageListOptions struct { - MatchName string - All bool - Filters filters.Args -} - -// ImageLoadResponse returns information to the client about a load process. -type ImageLoadResponse struct { - // Body must be closed to avoid a resource leak - Body io.ReadCloser - JSON bool -} - -// ImageDiffResponse returns information to the client about image diff. -type ImageDiffResponse struct { - // Body must be closed to avoid a resource leak - ExistLayers []string `json:"existLayers"` -} - -// ImagePullOptions holds information to pull images. -type ImagePullOptions struct { - All bool - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry - PrivilegeFunc RequestPrivilegeFunc -} - -// RequestPrivilegeFunc is a function interface that -// clients can supply to retry operations after -// getting an authorization error. -// This function returns the registry authentication -// header value in base 64 format, or an error -// if the privilege request fails. -type RequestPrivilegeFunc func() (string, error) - -//ImagePushOptions holds information to push images. -type ImagePushOptions ImagePullOptions - -// ImageRemoveOptions holds parameters to remove images. -type ImageRemoveOptions struct { - Force bool - PruneChildren bool -} - -// ImageSearchOptions holds parameters to search images with. -type ImageSearchOptions struct { - RegistryAuth string - PrivilegeFunc RequestPrivilegeFunc - Filters filters.Args -} - -// ImageTagOptions holds parameters to tag an image -type ImageTagOptions struct { - Force bool -} - -// ResizeOptions holds parameters to resize a tty. -// It can be used to resize container ttys and -// exec process ttys too. -type ResizeOptions struct { - Height int - Width int -} - -// VersionResponse holds version information for the client and the server -type VersionResponse struct { - Client *Version - Server *Version -} - -// ServerOK returns true when the client could connect to the docker server -// and parse the information received. It returns false otherwise. -func (v VersionResponse) ServerOK() bool { - return v.Server != nil -} diff --git a/vendor/github.com/hyperhq/hyper-api/types/configs.go b/vendor/github.com/hyperhq/hyper-api/types/configs.go deleted file mode 100644 index c95cb777a..000000000 --- a/vendor/github.com/hyperhq/hyper-api/types/configs.go +++ /dev/null @@ -1,53 +0,0 @@ -package types - -import ( - "github.com/hyperhq/hyper-api/types/container" - "github.com/hyperhq/hyper-api/types/network" -) - -// configs holds structs used for internal communication between the -// frontend (such as an http server) and the backend (such as the -// docker daemon). - -// ContainerCreateConfig is the parameter set to ContainerCreate() -type ContainerCreateConfig struct { - Name string - Config *container.Config - HostConfig *container.HostConfig - NetworkingConfig *network.NetworkingConfig - AdjustCPUShares bool -} - -// ContainerRmConfig holds arguments for the container remove -// operation. This struct is used to tell the backend what operations -// to perform. -type ContainerRmConfig struct { - ForceRemove, RemoveVolume, RemoveLink bool -} - -// ContainerCommitConfig contains build configs for commit operation, -// and is used when making a commit with the current state of the container. -type ContainerCommitConfig struct { - Pause bool - Repo string - Tag string - Author string - Comment string - // merge container config into commit config before commit - MergeConfigs bool - Config *container.Config -} - -// ExecConfig is a small subset of the Config struct that holds the configuration -// for the exec feature of docker. -type ExecConfig struct { - User string // User that will run the command - Privileged bool // Is the container in privileged mode - Tty bool // Attach standard streams to a tty. - AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStderr bool // Attach the standard output - AttachStdout bool // Attach the standard error - Detach bool // Execute in detach mode - DetachKeys string // Escape keys for detach - Cmd []string // Execution commands and args -} diff --git a/vendor/github.com/hyperhq/hyper-api/types/container/config.go b/vendor/github.com/hyperhq/hyper-api/types/container/config.go deleted file mode 100644 index 9d3675ccd..000000000 --- a/vendor/github.com/hyperhq/hyper-api/types/container/config.go +++ /dev/null @@ -1,37 +0,0 @@ -package container - -import ( - "github.com/docker/go-connections/nat" - "github.com/hyperhq/hyper-api/types/strslice" -) - -// Config contains the configuration data about a container. -// It should hold only portable information about the container. -// Here, "portable" means "independent from the host we are running on". -// Non-portable information *should* appear in HostConfig. -// All fields added to this struct must be marked `omitempty` to keep getting -// predictable hashes from the old `v1Compatibility` configuration. -type Config struct { - Hostname string // Hostname - Domainname string // Domainname - User string // User that will run the command(s) inside the container - AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStdout bool // Attach the standard output - AttachStderr bool // Attach the standard error - ExposedPorts map[nat.Port]struct{} `json:",omitempty"` // List of exposed ports - Tty bool // Attach standard streams to a tty, including stdin if it is not closed. - OpenStdin bool // Open stdin - StdinOnce bool // If true, close stdin after the 1 attached client disconnects. - Env []string // List of environment variable to set in the container - Cmd strslice.StrSlice // Command to run when starting the container - ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) - Image string // Name of the image as it was passed by the operator (eg. could be symbolic) - Volumes map[string]struct{} // List of volumes (mounts) used for the container - WorkingDir string // Current directory (PWD) in the command will be launched - Entrypoint strslice.StrSlice // Entrypoint to run when starting the container - NetworkDisabled bool `json:",omitempty"` // Is network disabled - MacAddress string `json:",omitempty"` // Mac Address of the container - OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile - Labels map[string]string // List of labels set to this container - StopSignal string `json:",omitempty"` // Signal to stop a container -} diff --git a/vendor/github.com/hyperhq/hyper-api/types/container/host_config.go b/vendor/github.com/hyperhq/hyper-api/types/container/host_config.go deleted file mode 100644 index 4b648f3d7..000000000 --- a/vendor/github.com/hyperhq/hyper-api/types/container/host_config.go +++ /dev/null @@ -1,319 +0,0 @@ -package container - -import ( - "strings" - - "github.com/docker/go-connections/nat" - "github.com/docker/go-units" - "github.com/hyperhq/hyper-api/types/blkiodev" - "github.com/hyperhq/hyper-api/types/strslice" -) - -// NetworkMode represents the container network stack. -type NetworkMode string - -// Isolation represents the isolation technology of a container. The supported -// values are platform specific -type Isolation string - -// IsDefault indicates the default isolation technology of a container. On Linux this -// is the native driver. On Windows, this is a Windows Server Container. -func (i Isolation) IsDefault() bool { - return strings.ToLower(string(i)) == "default" || string(i) == "" -} - -// IpcMode represents the container ipc stack. -type IpcMode string - -// IsPrivate indicates whether the container uses its private ipc stack. -func (n IpcMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// IsHost indicates whether the container uses the host's ipc stack. -func (n IpcMode) IsHost() bool { - return n == "host" -} - -// IsContainer indicates whether the container uses a container's ipc stack. -func (n IpcMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// Valid indicates whether the ipc stack is valid. -func (n IpcMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - case "container": - if len(parts) != 2 || parts[1] == "" { - return false - } - default: - return false - } - return true -} - -// Container returns the name of the container ipc stack is going to be used. -func (n IpcMode) Container() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// UsernsMode represents userns mode in the container. -type UsernsMode string - -// IsHost indicates whether the container uses the host's userns. -func (n UsernsMode) IsHost() bool { - return n == "host" -} - -// IsPrivate indicates whether the container uses the a private userns. -func (n UsernsMode) IsPrivate() bool { - return !(n.IsHost()) -} - -// Valid indicates whether the userns is valid. -func (n UsernsMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - default: - return false - } - return true -} - -// CgroupSpec represents the cgroup to use for the container. -type CgroupSpec string - -// IsContainer indicates whether the container is using another container cgroup -func (c CgroupSpec) IsContainer() bool { - parts := strings.SplitN(string(c), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// Valid indicates whether the cgroup spec is valid. -func (c CgroupSpec) Valid() bool { - return c.IsContainer() || c == "" -} - -// Container returns the name of the container whose cgroup will be used. -func (c CgroupSpec) Container() string { - parts := strings.SplitN(string(c), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// UTSMode represents the UTS namespace of the container. -type UTSMode string - -// IsPrivate indicates whether the container uses its private UTS namespace. -func (n UTSMode) IsPrivate() bool { - return !(n.IsHost()) -} - -// IsHost indicates whether the container uses the host's UTS namespace. -func (n UTSMode) IsHost() bool { - return n == "host" -} - -// Valid indicates whether the UTS namespace is valid. -func (n UTSMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - default: - return false - } - return true -} - -// PidMode represents the pid namespace of the container. -type PidMode string - -// IsPrivate indicates whether the container uses its own new pid namespace. -func (n PidMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// IsHost indicates whether the container uses the host's pid namespace. -func (n PidMode) IsHost() bool { - return n == "host" -} - -// IsContainer indicates whether the container uses a container's pid namespace. -func (n PidMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// Valid indicates whether the pid namespace is valid. -func (n PidMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - case "container": - if len(parts) != 2 || parts[1] == "" { - return false - } - default: - return false - } - return true -} - -// Container returns the name of the container whose pid namespace is going to be used. -func (n PidMode) Container() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// DeviceMapping represents the device mapping between the host and the container. -type DeviceMapping struct { - PathOnHost string - PathInContainer string - CgroupPermissions string -} - -// RestartPolicy represents the restart policies of the container. -type RestartPolicy struct { - Name string - MaximumRetryCount int -} - -// IsNone indicates whether the container has the "no" restart policy. -// This means the container will not automatically restart when exiting. -func (rp *RestartPolicy) IsNone() bool { - return rp.Name == "no" || rp.Name == "" -} - -// IsAlways indicates whether the container has the "always" restart policy. -// This means the container will automatically restart regardless of the exit status. -func (rp *RestartPolicy) IsAlways() bool { - return rp.Name == "always" -} - -// IsOnFailure indicates whether the container has the "on-failure" restart policy. -// This means the container will automatically restart of exiting with a non-zero exit status. -func (rp *RestartPolicy) IsOnFailure() bool { - return rp.Name == "on-failure" -} - -// IsUnlessStopped indicates whether the container has the -// "unless-stopped" restart policy. This means the container will -// automatically restart unless user has put it to stopped state. -func (rp *RestartPolicy) IsUnlessStopped() bool { - return rp.Name == "unless-stopped" -} - -// IsSame compares two RestartPolicy to see if they are the same -func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { - return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount -} - -// LogConfig represents the logging configuration of the container. -type LogConfig struct { - Type string - Config map[string]string -} - -// Resources contains container's resources (cgroups config, ulimits...) -type Resources struct { - // Applicable to all platforms - CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) - Memory int64 // Memory limit (in bytes) - - // Applicable to UNIX platforms - CgroupParent string // Parent cgroup. - BlkioWeight uint16 // Block IO weight (relative weight vs. other containers) - BlkioWeightDevice []*blkiodev.WeightDevice - BlkioDeviceReadBps []*blkiodev.ThrottleDevice - BlkioDeviceWriteBps []*blkiodev.ThrottleDevice - BlkioDeviceReadIOps []*blkiodev.ThrottleDevice - BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice - CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period - CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota - CpusetCpus string // CpusetCpus 0-2, 0,1 - CpusetMems string // CpusetMems 0-2, 0,1 - Devices []DeviceMapping // List of devices to map inside the container - DiskQuota int64 // Disk limit (in bytes) - KernelMemory int64 // Kernel memory limit (in bytes) - MemoryReservation int64 // Memory soft limit (in bytes) - MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap - MemorySwappiness *int64 // Tuning container memory swappiness behaviour - OomKillDisable *bool // Whether to disable OOM Killer or not - PidsLimit int64 // Setting pids limit for a container - Ulimits []*units.Ulimit // List of ulimits to be set in the container - - // Applicable to Windows - CPUCount int64 `json:"CpuCount"` // CPU count - CPUPercent int64 `json:"CpuPercent"` // CPU percent - IOMaximumIOps uint64 // Maximum IOps for the container system drive - IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive -} - -// UpdateConfig holds the mutable attributes of a Container. -// Those attributes can be updated at runtime. -type UpdateConfig struct { - // Contains container's resources (cgroups, ulimits) - Resources - RestartPolicy RestartPolicy -} - -// HostConfig the non-portable Config structure of a container. -// Here, "non-portable" means "dependent of the host we are running on". -// Portable information *should* appear in Config. -type HostConfig struct { - // Applicable to all platforms - Binds []string // List of volume bindings for this container - ContainerIDFile string // File (path) where the containerId is written - LogConfig LogConfig // Configuration of the logs for this container - NetworkMode NetworkMode // Network mode to use for the container - PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host - RestartPolicy RestartPolicy // Restart policy to be used for the container - AutoRemove bool // Automatically remove container when it exits - VolumeDriver string // Name of the volume driver used to mount volumes - VolumesFrom []string // List of volumes to take from other container - - // Applicable to UNIX platforms - CapAdd strslice.StrSlice // List of kernel capabilities to add to the container - CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container - DNS []string `json:"Dns"` // List of DNS server to lookup - DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for - DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for - ExtraHosts []string // List of extra hosts - GroupAdd []string // List of additional groups that the container process will run as - IpcMode IpcMode // IPC namespace to use for the container - Cgroup CgroupSpec // Cgroup to use for the container - Links []string // List of links (in the name:alias form) - OomScoreAdj int // Container preference for OOM-killing - PidMode PidMode // PID namespace to use for the container - Privileged bool // Is the container in privileged mode - PublishAllPorts bool // Should docker publish all exposed port for the container - ReadonlyRootfs bool // Is the container root filesystem in read-only - SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux. - StorageOpt map[string]string // Storage driver options per container. - Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container - UTSMode UTSMode // UTS namespace to use for the container - UsernsMode UsernsMode // The user namespace to use for the container - ShmSize int64 // Total shm memory usage - Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container - - // Applicable to Windows - ConsoleSize [2]int // Initial console size - Isolation Isolation // Isolation technology of the container (eg default, hyperv) - - // Contains container's resources (cgroups, ulimits) - Resources -} diff --git a/vendor/github.com/hyperhq/hyper-api/types/container/hostconfig.go b/vendor/github.com/hyperhq/hyper-api/types/container/hostconfig.go deleted file mode 100644 index 5c7e1985e..000000000 --- a/vendor/github.com/hyperhq/hyper-api/types/container/hostconfig.go +++ /dev/null @@ -1,79 +0,0 @@ -package container - -import "strings" - -// IsValid indicates if an isolation technology is valid -func (i Isolation) IsValid() bool { - return i.IsDefault() -} - -// IsPrivate indicates whether container uses it's private network stack. -func (n NetworkMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// IsDefault indicates whether container uses the default network stack. -func (n NetworkMode) IsDefault() bool { - return n == "default" -} - -// NetworkName returns the name of the network stack. -func (n NetworkMode) NetworkName() string { - if n.IsBridge() { - return "bridge" - } else if n.IsHost() { - return "host" - } else if n.IsContainer() { - return "container" - } else if n.IsNone() { - return "none" - } else if n.IsDefault() { - return "default" - } else if n.IsUserDefined() { - return n.UserDefined() - } - return "" -} - -// IsBridge indicates whether container uses the bridge network stack -func (n NetworkMode) IsBridge() bool { - return n == "bridge" -} - -// IsHost indicates whether container uses the host network stack. -func (n NetworkMode) IsHost() bool { - return n == "host" -} - -// IsContainer indicates whether container uses a container network stack. -func (n NetworkMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// IsNone indicates whether container isn't using a network stack. -func (n NetworkMode) IsNone() bool { - return n == "none" -} - -// ConnectedContainer is the id of the container which network this container is connected to. -func (n NetworkMode) ConnectedContainer() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// IsUserDefined indicates user-created network -func (n NetworkMode) IsUserDefined() bool { - return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() -} - -//UserDefined indicates user-created network -func (n NetworkMode) UserDefined() string { - if n.IsUserDefined() { - return string(n) - } - return "" -} diff --git a/vendor/github.com/hyperhq/hyper-api/types/cron.go b/vendor/github.com/hyperhq/hyper-api/types/cron.go deleted file mode 100644 index b74a46644..000000000 --- a/vendor/github.com/hyperhq/hyper-api/types/cron.go +++ /dev/null @@ -1,67 +0,0 @@ -package types - -import ( - "time" - - "github.com/hyperhq/hyper-api/types/container" - "github.com/hyperhq/hyper-api/types/filters" - "github.com/hyperhq/hyper-api/types/network" -) - -type Cron struct { - // Job name. Must be unique, acts as the id. - Name string `json:"Name"` - - // Cron expression for the job. When to run the job. - Schedule string `json:"Schedule"` - - // AccessKey - AccessKey string `json:"AccessKey"` - - // SecretKey - SecretKey string `json:"SecretKey"` - - ContainerName string `json:"ContainerName"` - Config *container.Config `json:"Config"` - HostConfig *container.HostConfig `json:"HostConfig"` - NetConfig *network.NetworkingConfig `json:"NetConfig"` - - // Owner of the job. - Owner string `json:"Owner"` - - // Owner email of the job. - OwnerEmail string `json:"OwnerEmail"` - - // MailPolicy - MailPolicy string `json:"MailPolicy"` - - // Number of successful executions of this job. - SuccessCount int `json:"SuccessCount"` - - // Number of errors running this job. - ErrorCount int `json:"ErrorCount"` - - // Last time this job executed. - LastRun time.Time `json:"LastRun"` - - Created time.Time `json:"Created"` - - // Is this job disabled? - Disabled bool `json:"Disabled"` - - // Tags of the target servers to run this job against. - Tags map[string]string `json:"Tags"` -} - -type CronListOptions struct { - Filters filters.Args -} - -type Event struct { - StartedAt int64 `json:"StartedAt"` - FinishedAt int64 `json:"FinishedAt"` - Status string `json:"Status"` - Job string `json:"Job"` - Container string `json:"Container"` - Message string `json:"Message"` -} diff --git a/vendor/github.com/hyperhq/hyper-api/types/errors.go b/vendor/github.com/hyperhq/hyper-api/types/errors.go deleted file mode 100644 index 649ab9513..000000000 --- a/vendor/github.com/hyperhq/hyper-api/types/errors.go +++ /dev/null @@ -1,6 +0,0 @@ -package types - -// ErrorResponse is the response body of API errors. -type ErrorResponse struct { - Message string `json:"message"` -} diff --git a/vendor/github.com/hyperhq/hyper-api/types/filters/parse.go b/vendor/github.com/hyperhq/hyper-api/types/filters/parse.go deleted file mode 100644 index 36d3105c9..000000000 --- a/vendor/github.com/hyperhq/hyper-api/types/filters/parse.go +++ /dev/null @@ -1,307 +0,0 @@ -// Package filters provides helper function to parse and handle command line -// filter, used for example in docker ps or docker images commands. -package filters - -import ( - "encoding/json" - "errors" - "fmt" - "regexp" - "strings" - - "github.com/hyperhq/hyper-api/types/versions" -) - -// Args stores filter arguments as map key:{map key: bool}. -// It contains an aggregation of the map of arguments (which are in the form -// of -f 'key=value') based on the key, and stores values for the same key -// in a map with string keys and boolean values. -// e.g given -f 'label=label1=1' -f 'label=label2=2' -f 'image.name=ubuntu' -// the args will be {"image.name":{"ubuntu":true},"label":{"label1=1":true,"label2=2":true}} -type Args struct { - fields map[string]map[string]bool -} - -// NewArgs initializes a new Args struct. -func NewArgs() Args { - return Args{fields: map[string]map[string]bool{}} -} - -// ParseFlag parses the argument to the filter flag. Like -// -// `docker ps -f 'created=today' -f 'image.name=ubuntu*'` -// -// If prev map is provided, then it is appended to, and returned. By default a new -// map is created. -func ParseFlag(arg string, prev Args) (Args, error) { - filters := prev - if len(arg) == 0 { - return filters, nil - } - - if !strings.Contains(arg, "=") { - return filters, ErrBadFormat - } - - f := strings.SplitN(arg, "=", 2) - - name := strings.ToLower(strings.TrimSpace(f[0])) - value := strings.TrimSpace(f[1]) - - filters.Add(name, value) - - return filters, nil -} - -// ErrBadFormat is an error returned in case of bad format for a filter. -var ErrBadFormat = errors.New("bad format of filter (expected name=value)") - -// ToParam packs the Args into a string for easy transport from client to server. -func ToParam(a Args) (string, error) { - // this way we don't URL encode {}, just empty space - if a.Len() == 0 { - return "", nil - } - - buf, err := json.Marshal(a.fields) - if err != nil { - return "", err - } - return string(buf), nil -} - -// ToParamWithVersion packs the Args into a string for easy transport from client to server. -// The generated string will depend on the specified version (corresponding to the API version). -func ToParamWithVersion(version string, a Args) (string, error) { - // this way we don't URL encode {}, just empty space - if a.Len() == 0 { - return "", nil - } - - // for daemons older than v1.10, filter must be of the form map[string][]string - buf := []byte{} - err := errors.New("") - if version != "" && versions.LessThan(version, "1.22") { - buf, err = json.Marshal(convertArgsToSlice(a.fields)) - } else { - buf, err = json.Marshal(a.fields) - } - if err != nil { - return "", err - } - return string(buf), nil -} - -// FromParam unpacks the filter Args. -func FromParam(p string) (Args, error) { - if len(p) == 0 { - return NewArgs(), nil - } - - r := strings.NewReader(p) - d := json.NewDecoder(r) - - m := map[string]map[string]bool{} - if err := d.Decode(&m); err != nil { - r.Seek(0, 0) - - // Allow parsing old arguments in slice format. - // Because other libraries might be sending them in this format. - deprecated := map[string][]string{} - if deprecatedErr := d.Decode(&deprecated); deprecatedErr == nil { - m = deprecatedArgs(deprecated) - } else { - return NewArgs(), err - } - } - return Args{m}, nil -} - -// Get returns the list of values associates with a field. -// It returns a slice of strings to keep backwards compatibility with old code. -func (filters Args) Get(field string) []string { - values := filters.fields[field] - if values == nil { - return make([]string, 0) - } - slice := make([]string, 0, len(values)) - for key := range values { - slice = append(slice, key) - } - return slice -} - -// Add adds a new value to a filter field. -func (filters Args) Add(name, value string) { - if _, ok := filters.fields[name]; ok { - filters.fields[name][value] = true - } else { - filters.fields[name] = map[string]bool{value: true} - } -} - -// Del removes a value from a filter field. -func (filters Args) Del(name, value string) { - if _, ok := filters.fields[name]; ok { - delete(filters.fields[name], value) - } -} - -// Len returns the number of fields in the arguments. -func (filters Args) Len() int { - return len(filters.fields) -} - -// MatchKVList returns true if the values for the specified field matches the ones -// from the sources. -// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, -// field is 'label' and sources are {'label1': '1', 'label2': '2'} -// it returns true. -func (filters Args) MatchKVList(field string, sources map[string]string) bool { - fieldValues := filters.fields[field] - - //do not filter if there is no filter set or cannot determine filter - if len(fieldValues) == 0 { - return true - } - - if sources == nil || len(sources) == 0 { - return false - } - - for name2match := range fieldValues { - testKV := strings.SplitN(name2match, "=", 2) - - v, ok := sources[testKV[0]] - if !ok { - return false - } - if len(testKV) == 2 && testKV[1] != v { - return false - } - } - - return true -} - -// Match returns true if the values for the specified field matches the source string -// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, -// field is 'image.name' and source is 'ubuntu' -// it returns true. -func (filters Args) Match(field, source string) bool { - if filters.ExactMatch(field, source) { - return true - } - - fieldValues := filters.fields[field] - for name2match := range fieldValues { - match, err := regexp.MatchString(name2match, source) - if err != nil { - continue - } - if match { - return true - } - } - return false -} - -// ExactMatch returns true if the source matches exactly one of the filters. -func (filters Args) ExactMatch(field, source string) bool { - fieldValues, ok := filters.fields[field] - //do not filter if there is no filter set or cannot determine filter - if !ok || len(fieldValues) == 0 { - return true - } - - // try to match full name value to avoid O(N) regular expression matching - return fieldValues[source] -} - -// UniqueExactMatch returns true if there is only one filter and the source matches exactly this one. -func (filters Args) UniqueExactMatch(field, source string) bool { - fieldValues := filters.fields[field] - //do not filter if there is no filter set or cannot determine filter - if len(fieldValues) == 0 { - return true - } - if len(filters.fields[field]) != 1 { - return false - } - - // try to match full name value to avoid O(N) regular expression matching - return fieldValues[source] -} - -// FuzzyMatch returns true if the source matches exactly one of the filters, -// or the source has one of the filters as a prefix. -func (filters Args) FuzzyMatch(field, source string) bool { - if filters.ExactMatch(field, source) { - return true - } - - fieldValues := filters.fields[field] - for prefix := range fieldValues { - if strings.HasPrefix(source, prefix) { - return true - } - } - return false -} - -// Include returns true if the name of the field to filter is in the filters. -func (filters Args) Include(field string) bool { - _, ok := filters.fields[field] - return ok -} - -// Validate ensures that all the fields in the filter are valid. -// It returns an error as soon as it finds an invalid field. -func (filters Args) Validate(accepted map[string]bool) error { - for name := range filters.fields { - if !accepted[name] { - return fmt.Errorf("Invalid filter '%s'", name) - } - } - return nil -} - -// WalkValues iterates over the list of filtered values for a field. -// It stops the iteration if it finds an error and it returns that error. -func (filters Args) WalkValues(field string, op func(value string) error) error { - if _, ok := filters.fields[field]; !ok { - return nil - } - for v := range filters.fields[field] { - if err := op(v); err != nil { - return err - } - } - return nil -} - -func deprecatedArgs(d map[string][]string) map[string]map[string]bool { - m := map[string]map[string]bool{} - for k, v := range d { - values := map[string]bool{} - for _, vv := range v { - values[vv] = true - } - m[k] = values - } - return m -} - -func convertArgsToSlice(f map[string]map[string]bool) map[string][]string { - m := map[string][]string{} - for k, v := range f { - values := []string{} - for kk := range v { - if v[kk] { - values = append(values, kk) - } - } - m[k] = values - } - return m -} diff --git a/vendor/github.com/hyperhq/hyper-api/types/func.go b/vendor/github.com/hyperhq/hyper-api/types/func.go deleted file mode 100644 index 7dfca91bc..000000000 --- a/vendor/github.com/hyperhq/hyper-api/types/func.go +++ /dev/null @@ -1,84 +0,0 @@ -package types - -import ( - "time" - - "github.com/docker/go-connections/nat" - "github.com/hyperhq/hyper-api/types/container" - "github.com/hyperhq/hyper-api/types/filters" - "github.com/hyperhq/hyper-api/types/network" - "github.com/hyperhq/hyper-api/types/strslice" -) - -type FuncConfig struct { - Tty bool `json:"Tty,omitempty"` - Env *[]string `json:"Env,omitempty"` - ExposedPorts map[nat.Port]struct{} `json:"ExposedPorts,omitempty"` - Cmd strslice.StrSlice `json:"Cmd,omitempty"` - Image string `json:"Image,omitempty"` - Entrypoint strslice.StrSlice `json:"Entrypoint,omitempty"` - WorkingDir string `json:"WorkingDir,omitempty"` - Labels map[string]string `json:"Labels,omitempty"` - StopSignal string `json:"StopSignal,omitempty"` -} - -type FuncHostConfig struct { - VolumesFrom []string `json:"VolumesFrom,omitempty"` - PortBindings nat.PortMap `json:"PortBindings,omitempty"` - Links []string `json:"Links,omitempty"` - PublishAllPorts bool `json:"PublishAllPorts,omitempty"` - NetworkMode container.NetworkMode `json:"NetworkMode,omitempty"` -} - -type Func struct { - // Func name, required, unique, immutable, max length: 255, format: [a-z0-9]([-a-z0-9]*[a-z0-9])? - Name string `json:"Name"` - - // Container size, optional, default: s4 - ContainerSize string `json:"ContainerSize,omitempty"` - - // The maximum execution duration of function call - Timeout int `json:"Timeout,omitempty"` - - // The UUID of func - UUID string `json:"UUID,omitempty"` - - // The created time - Created time.Time `json:"Created,omitempty"` - - // Weather the UUID should be regenerated - Refresh bool `json:"Refresh,omitempty"` - - // The container config - Config FuncConfig `json:"Config,omitempty"` - - HostConfig FuncHostConfig `json:"HostConfig,omitempty"` - - NetworkingConfig network.NetworkingConfig `json:"NetworkingConfig,omitempty"` -} - -type FuncListOptions struct { - Filters filters.Args -} - -type FuncCallResponse struct { - CallId string `json:"CallId"` -} - -type FuncLogsResponse struct { - Time time.Time `json:"Time"` - Event string `json:"Event"` - CallId string `json:"CallId"` - ShortStdin string `json:"ShortStdin"` - ShortStdout string `json:"ShortStdout"` - ShortStderr string `json:"ShortStderr"` - Message string `json:"Message"` -} - -type FuncStatusResponse struct { - Total int `json:"Total"` - Pending int `json:"Pending"` - Running int `json:"Running"` - Finished int `json:"Finished"` - Failed int `json:"Failed"` -} diff --git a/vendor/github.com/hyperhq/hyper-api/types/network/network.go b/vendor/github.com/hyperhq/hyper-api/types/network/network.go deleted file mode 100644 index bce60f5ee..000000000 --- a/vendor/github.com/hyperhq/hyper-api/types/network/network.go +++ /dev/null @@ -1,52 +0,0 @@ -package network - -// Address represents an IP address -type Address struct { - Addr string - PrefixLen int -} - -// IPAM represents IP Address Management -type IPAM struct { - Driver string - Options map[string]string //Per network IPAM driver options - Config []IPAMConfig -} - -// IPAMConfig represents IPAM configurations -type IPAMConfig struct { - Subnet string `json:",omitempty"` - IPRange string `json:",omitempty"` - Gateway string `json:",omitempty"` - AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` -} - -// EndpointIPAMConfig represents IPAM configurations for the endpoint -type EndpointIPAMConfig struct { - IPv4Address string `json:",omitempty"` - IPv6Address string `json:",omitempty"` -} - -// EndpointSettings stores the network endpoint details -type EndpointSettings struct { - // Configurations - IPAMConfig *EndpointIPAMConfig - Links []string - Aliases []string - // Operational data - NetworkID string - EndpointID string - Gateway string - IPAddress string - IPPrefixLen int - IPv6Gateway string - GlobalIPv6Address string - GlobalIPv6PrefixLen int - MacAddress string -} - -// NetworkingConfig represents the container's networking configuration for each of its interfaces -// Carries the networking configs specified in the `docker run` and `docker network connect` commands -type NetworkingConfig struct { - EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network -} diff --git a/vendor/github.com/hyperhq/hyper-api/types/plugin.go b/vendor/github.com/hyperhq/hyper-api/types/plugin.go deleted file mode 100644 index 601c0ac12..000000000 --- a/vendor/github.com/hyperhq/hyper-api/types/plugin.go +++ /dev/null @@ -1,170 +0,0 @@ -// +build experimental - -package types - -import ( - "encoding/json" - "fmt" -) - -// PluginInstallOptions holds parameters to install a plugin. -type PluginInstallOptions struct { - Disabled bool - AcceptAllPermissions bool - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry - PrivilegeFunc RequestPrivilegeFunc - AcceptPermissionsFunc func(PluginPrivileges) (bool, error) -} - -// PluginConfig represents the values of settings potentially modifiable by a user -type PluginConfig struct { - Mounts []PluginMount - Env []string - Args []string - Devices []PluginDevice -} - -// Plugin represents a Docker plugin for the remote API -type Plugin struct { - ID string `json:"Id,omitempty"` - Name string - Tag string - // Enabled is true when the plugin is running, is false when the plugin is not running, only installed. - Enabled bool - Config PluginConfig - Manifest PluginManifest -} - -// PluginsListResponse contains the response for the remote API -type PluginsListResponse []*Plugin - -const ( - authzDriver = "AuthzDriver" - graphDriver = "GraphDriver" - ipamDriver = "IpamDriver" - networkDriver = "NetworkDriver" - volumeDriver = "VolumeDriver" -) - -// PluginInterfaceType represents a type that a plugin implements. -type PluginInterfaceType struct { - Prefix string // This is always "docker" - Capability string // Capability should be validated against the above list. - Version string // Plugin API version. Depends on the capability -} - -// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType -func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error { - versionIndex := len(p) - prefixIndex := 0 - if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' { - return fmt.Errorf("%q is not a plugin interface type", p) - } - p = p[1 : len(p)-1] -loop: - for i, b := range p { - switch b { - case '.': - prefixIndex = i - case '/': - versionIndex = i - break loop - } - } - t.Prefix = string(p[:prefixIndex]) - t.Capability = string(p[prefixIndex+1 : versionIndex]) - if versionIndex < len(p) { - t.Version = string(p[versionIndex+1:]) - } - return nil -} - -// MarshalJSON implements json.Marshaler for PluginInterfaceType -func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) { - return json.Marshal(t.String()) -} - -// String implements fmt.Stringer for PluginInterfaceType -func (t PluginInterfaceType) String() string { - return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version) -} - -// PluginInterface describes the interface between Docker and plugin -type PluginInterface struct { - Types []PluginInterfaceType - Socket string -} - -// PluginSetting is to be embedded in other structs, if they are supposed to be -// modifiable by the user. -type PluginSetting struct { - Name string - Description string - Settable []string -} - -// PluginNetwork represents the network configuration for a plugin -type PluginNetwork struct { - Type string -} - -// PluginMount represents the mount configuration for a plugin -type PluginMount struct { - PluginSetting - Source *string - Destination string - Type string - Options []string -} - -// PluginEnv represents an environment variable for a plugin -type PluginEnv struct { - PluginSetting - Value *string -} - -// PluginArgs represents the command line arguments for a plugin -type PluginArgs struct { - PluginSetting - Value []string -} - -// PluginDevice represents a device for a plugin -type PluginDevice struct { - PluginSetting - Path *string -} - -// PluginUser represents the user for the plugin's process -type PluginUser struct { - UID uint32 `json:"Uid,omitempty"` - GID uint32 `json:"Gid,omitempty"` -} - -// PluginManifest represents the manifest of a plugin -type PluginManifest struct { - ManifestVersion string - Description string - Documentation string - Interface PluginInterface - Entrypoint []string - Workdir string - User PluginUser `json:",omitempty"` - Network PluginNetwork - Capabilities []string - Mounts []PluginMount - Devices []PluginDevice - Env []PluginEnv - Args PluginArgs -} - -// PluginPrivilege describes a permission the user has to accept -// upon installing a plugin. -type PluginPrivilege struct { - Name string - Description string - Value []string -} - -// PluginPrivileges is a list of PluginPrivilege -type PluginPrivileges []PluginPrivilege diff --git a/vendor/github.com/hyperhq/hyper-api/types/reference/image_reference.go b/vendor/github.com/hyperhq/hyper-api/types/reference/image_reference.go deleted file mode 100644 index be9cf8ebe..000000000 --- a/vendor/github.com/hyperhq/hyper-api/types/reference/image_reference.go +++ /dev/null @@ -1,34 +0,0 @@ -package reference - -import ( - distreference "github.com/docker/distribution/reference" -) - -// Parse parses the given references and returns the repository and -// tag (if present) from it. If there is an error during parsing, it will -// return an error. -func Parse(ref string) (string, string, error) { - distributionRef, err := distreference.ParseNamed(ref) - if err != nil { - return "", "", err - } - - tag := GetTagFromNamedRef(distributionRef) - return distributionRef.Name(), tag, nil -} - -// GetTagFromNamedRef returns a tag from the specified reference. -// This function is necessary as long as the docker "server" api makes the distinction between repository -// and tags. -func GetTagFromNamedRef(ref distreference.Named) string { - var tag string - switch x := ref.(type) { - case distreference.Digested: - tag = x.Digest().String() - case distreference.NamedTagged: - tag = x.Tag() - default: - tag = "latest" - } - return tag -} diff --git a/vendor/github.com/hyperhq/hyper-api/types/registry/registry.go b/vendor/github.com/hyperhq/hyper-api/types/registry/registry.go deleted file mode 100644 index d2aca6f02..000000000 --- a/vendor/github.com/hyperhq/hyper-api/types/registry/registry.go +++ /dev/null @@ -1,99 +0,0 @@ -package registry - -import ( - "encoding/json" - "net" -) - -// ServiceConfig stores daemon registry services configuration. -type ServiceConfig struct { - InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` - IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` - Mirrors []string -} - -// NetIPNet is the net.IPNet type, which can be marshalled and -// unmarshalled to JSON -type NetIPNet net.IPNet - -// MarshalJSON returns the JSON representation of the IPNet -func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { - return json.Marshal((*net.IPNet)(ipnet).String()) -} - -// UnmarshalJSON sets the IPNet from a byte array of JSON -func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { - var ipnetStr string - if err = json.Unmarshal(b, &ipnetStr); err == nil { - var cidr *net.IPNet - if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { - *ipnet = NetIPNet(*cidr) - } - } - return -} - -// IndexInfo contains information about a registry -// -// RepositoryInfo Examples: -// { -// "Index" : { -// "Name" : "docker.io", -// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], -// "Secure" : true, -// "Official" : true, -// }, -// "RemoteName" : "library/debian", -// "LocalName" : "debian", -// "CanonicalName" : "docker.io/debian" -// "Official" : true, -// } -// -// { -// "Index" : { -// "Name" : "127.0.0.1:5000", -// "Mirrors" : [], -// "Secure" : false, -// "Official" : false, -// }, -// "RemoteName" : "user/repo", -// "LocalName" : "127.0.0.1:5000/user/repo", -// "CanonicalName" : "127.0.0.1:5000/user/repo", -// "Official" : false, -// } -type IndexInfo struct { - // Name is the name of the registry, such as "docker.io" - Name string - // Mirrors is a list of mirrors, expressed as URIs - Mirrors []string - // Secure is set to false if the registry is part of the list of - // insecure registries. Insecure registries accept HTTP and/or accept - // HTTPS with certificates from unknown CAs. - Secure bool - // Official indicates whether this is an official registry - Official bool -} - -// SearchResult describes a search result returned from a registry -type SearchResult struct { - // StarCount indicates the number of stars this repository has - StarCount int `json:"star_count"` - // IsOfficial is true if the result is from an official repository. - IsOfficial bool `json:"is_official"` - // Name is the name of the repository - Name string `json:"name"` - // IsAutomated indicates whether the result is automated - IsAutomated bool `json:"is_automated"` - // Description is a textual description of the repository - Description string `json:"description"` -} - -// SearchResults lists a collection search results returned from a registry -type SearchResults struct { - // Query contains the query string that generated the search results - Query string `json:"query"` - // NumResults indicates the number of results the query returned - NumResults int `json:"num_results"` - // Results is a slice containing the actual results for the search - Results []SearchResult `json:"results"` -} diff --git a/vendor/github.com/hyperhq/hyper-api/types/seccomp.go b/vendor/github.com/hyperhq/hyper-api/types/seccomp.go deleted file mode 100644 index e0305a9e3..000000000 --- a/vendor/github.com/hyperhq/hyper-api/types/seccomp.go +++ /dev/null @@ -1,68 +0,0 @@ -package types - -// Seccomp represents the config for a seccomp profile for syscall restriction. -type Seccomp struct { - DefaultAction Action `json:"defaultAction"` - Architectures []Arch `json:"architectures"` - Syscalls []*Syscall `json:"syscalls"` -} - -// Arch used for additional architectures -type Arch string - -// Additional architectures permitted to be used for system calls -// By default only the native architecture of the kernel is permitted -const ( - ArchX86 Arch = "SCMP_ARCH_X86" - ArchX86_64 Arch = "SCMP_ARCH_X86_64" - ArchX32 Arch = "SCMP_ARCH_X32" - ArchARM Arch = "SCMP_ARCH_ARM" - ArchAARCH64 Arch = "SCMP_ARCH_AARCH64" - ArchMIPS Arch = "SCMP_ARCH_MIPS" - ArchMIPS64 Arch = "SCMP_ARCH_MIPS64" - ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32" - ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL" - ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64" - ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32" -) - -// Action taken upon Seccomp rule match -type Action string - -// Define actions for Seccomp rules -const ( - ActKill Action = "SCMP_ACT_KILL" - ActTrap Action = "SCMP_ACT_TRAP" - ActErrno Action = "SCMP_ACT_ERRNO" - ActTrace Action = "SCMP_ACT_TRACE" - ActAllow Action = "SCMP_ACT_ALLOW" -) - -// Operator used to match syscall arguments in Seccomp -type Operator string - -// Define operators for syscall arguments in Seccomp -const ( - OpNotEqual Operator = "SCMP_CMP_NE" - OpLessThan Operator = "SCMP_CMP_LT" - OpLessEqual Operator = "SCMP_CMP_LE" - OpEqualTo Operator = "SCMP_CMP_EQ" - OpGreaterEqual Operator = "SCMP_CMP_GE" - OpGreaterThan Operator = "SCMP_CMP_GT" - OpMaskedEqual Operator = "SCMP_CMP_MASKED_EQ" -) - -// Arg used for matching specific syscall arguments in Seccomp -type Arg struct { - Index uint `json:"index"` - Value uint64 `json:"value"` - ValueTwo uint64 `json:"valueTwo"` - Op Operator `json:"op"` -} - -// Syscall is used to match a syscall in Seccomp -type Syscall struct { - Name string `json:"name"` - Action Action `json:"action"` - Args []*Arg `json:"args"` -} diff --git a/vendor/github.com/hyperhq/hyper-api/types/service.go b/vendor/github.com/hyperhq/hyper-api/types/service.go deleted file mode 100644 index 9d3b54496..000000000 --- a/vendor/github.com/hyperhq/hyper-api/types/service.go +++ /dev/null @@ -1,61 +0,0 @@ -package types - -import ( - "github.com/hyperhq/hyper-api/types/filters" - "github.com/hyperhq/hyper-api/types/strslice" -) - -const ( - LBProtocolHTTP string = "http" - LBProtocolHTTPS string = "https" - LBProtocolTCP string = "tcp" - LBProtocolHTTPSTERM string = "httpsTerm" - - LBAlgorithmRoundRobin string = "roundrobin" - LBAlgorithmLeastConn string = "leastconn" - LBAlgorithmSource string = "source" -) - -// Service represents the configuration of a service for the remote API -type Service struct { - Name string - Image string - WorkingDir string - ContainerSize string - SSLCert string - NetMode string - StopSignal string - ServicePort int - ContainerPort int - Replicas int - HealthCheckInterval int - HealthCheckFall int - HealthCheckRise int - Algorithm string - Protocol string - Stdin bool - Tty bool - SessionAffinity bool - Entrypoint strslice.StrSlice // Entrypoint to run when starting the container - Cmd strslice.StrSlice // Command to run when starting the container - Env []string - Volumes map[string]struct{} // List of volumes (mounts) used for the container - Labels map[string]string - SecurityGroups map[string]struct{} - - IP string - FIP string - Message string - Status string - Containers []string -} - -type ServiceListOptions struct { - Filters filters.Args -} - -type ServiceUpdate struct { - Replicas *int - Image *string - FIP *string -} diff --git a/vendor/github.com/hyperhq/hyper-api/types/stats.go b/vendor/github.com/hyperhq/hyper-api/types/stats.go deleted file mode 100644 index b420ebe7f..000000000 --- a/vendor/github.com/hyperhq/hyper-api/types/stats.go +++ /dev/null @@ -1,115 +0,0 @@ -// Package types is used for API stability in the types and response to the -// consumers of the API stats endpoint. -package types - -import "time" - -// ThrottlingData stores CPU throttling stats of one running container -type ThrottlingData struct { - // Number of periods with throttling active - Periods uint64 `json:"periods"` - // Number of periods when the container hits its throttling limit. - ThrottledPeriods uint64 `json:"throttled_periods"` - // Aggregate time the container was throttled for in nanoseconds. - ThrottledTime uint64 `json:"throttled_time"` -} - -// CPUUsage stores All CPU stats aggregated since container inception. -type CPUUsage struct { - // Total CPU time consumed. - // Units: nanoseconds. - TotalUsage uint64 `json:"total_usage"` - // Total CPU time consumed per core. - // Units: nanoseconds. - PercpuUsage []uint64 `json:"percpu_usage"` - // Time spent by tasks of the cgroup in kernel mode. - // Units: nanoseconds. - UsageInKernelmode uint64 `json:"usage_in_kernelmode"` - // Time spent by tasks of the cgroup in user mode. - // Units: nanoseconds. - UsageInUsermode uint64 `json:"usage_in_usermode"` -} - -// CPUStats aggregates and wraps all CPU related info of container -type CPUStats struct { - CPUUsage CPUUsage `json:"cpu_usage"` - SystemUsage uint64 `json:"system_cpu_usage"` - ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` -} - -// MemoryStats aggregates All memory stats since container inception -type MemoryStats struct { - // current res_counter usage for memory - Usage uint64 `json:"usage"` - // maximum usage ever recorded. - MaxUsage uint64 `json:"max_usage"` - // TODO(vishh): Export these as stronger types. - // all the stats exported via memory.stat. - Stats map[string]uint64 `json:"stats"` - // number of times memory usage hits limits. - Failcnt uint64 `json:"failcnt"` - Limit uint64 `json:"limit"` -} - -// BlkioStatEntry is one small entity to store a piece of Blkio stats -// TODO Windows: This can be factored out -type BlkioStatEntry struct { - Major uint64 `json:"major"` - Minor uint64 `json:"minor"` - Op string `json:"op"` - Value uint64 `json:"value"` -} - -// BlkioStats stores All IO service stats for data read and write -// TODO Windows: This can be factored out -type BlkioStats struct { - // number of bytes transferred to and from the block device - IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` - IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` - IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` - IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` - IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` - IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` - IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` - SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` -} - -// NetworkStats aggregates All network stats of one container -// TODO Windows: This will require refactoring -type NetworkStats struct { - RxBytes uint64 `json:"rx_bytes"` - RxPackets uint64 `json:"rx_packets"` - RxErrors uint64 `json:"rx_errors"` - RxDropped uint64 `json:"rx_dropped"` - TxBytes uint64 `json:"tx_bytes"` - TxPackets uint64 `json:"tx_packets"` - TxErrors uint64 `json:"tx_errors"` - TxDropped uint64 `json:"tx_dropped"` -} - -// PidsStats contains the stats of a container's pids -type PidsStats struct { - // Current is the number of pids in the cgroup - Current uint64 `json:"current,omitempty"` - // Limit is the hard limit on the number of pids in the cgroup. - // A "Limit" of 0 means that there is no limit. - Limit uint64 `json:"limit,omitempty"` -} - -// Stats is Ultimate struct aggregating all types of stats of one container -type Stats struct { - Read time.Time `json:"read"` - PreCPUStats CPUStats `json:"precpu_stats,omitempty"` - CPUStats CPUStats `json:"cpu_stats,omitempty"` - MemoryStats MemoryStats `json:"memory_stats,omitempty"` - BlkioStats BlkioStats `json:"blkio_stats,omitempty"` - PidsStats PidsStats `json:"pids_stats,omitempty"` -} - -// StatsJSON is newly used Networks -type StatsJSON struct { - Stats - - // Networks request version >=1.21 - Networks map[string]NetworkStats `json:"networks,omitempty"` -} diff --git a/vendor/github.com/hyperhq/hyper-api/types/strslice/strslice.go b/vendor/github.com/hyperhq/hyper-api/types/strslice/strslice.go deleted file mode 100644 index bad493fb8..000000000 --- a/vendor/github.com/hyperhq/hyper-api/types/strslice/strslice.go +++ /dev/null @@ -1,30 +0,0 @@ -package strslice - -import "encoding/json" - -// StrSlice represents a string or an array of strings. -// We need to override the json decoder to accept both options. -type StrSlice []string - -// UnmarshalJSON decodes the byte slice whether it's a string or an array of -// strings. This method is needed to implement json.Unmarshaler. -func (e *StrSlice) UnmarshalJSON(b []byte) error { - if len(b) == 0 { - // With no input, we preserve the existing value by returning nil and - // leaving the target alone. This allows defining default values for - // the type. - return nil - } - - p := make([]string, 0, 1) - if err := json.Unmarshal(b, &p); err != nil { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - p = append(p, s) - } - - *e = p - return nil -} diff --git a/vendor/github.com/hyperhq/hyper-api/types/time/duration_convert.go b/vendor/github.com/hyperhq/hyper-api/types/time/duration_convert.go deleted file mode 100644 index 63e1eec19..000000000 --- a/vendor/github.com/hyperhq/hyper-api/types/time/duration_convert.go +++ /dev/null @@ -1,12 +0,0 @@ -package time - -import ( - "strconv" - "time" -) - -// DurationToSecondsString converts the specified duration to the number -// seconds it represents, formatted as a string. -func DurationToSecondsString(duration time.Duration) string { - return strconv.FormatFloat(duration.Seconds(), 'f', 0, 64) -} diff --git a/vendor/github.com/hyperhq/hyper-api/types/time/timestamp.go b/vendor/github.com/hyperhq/hyper-api/types/time/timestamp.go deleted file mode 100644 index d3695ba72..000000000 --- a/vendor/github.com/hyperhq/hyper-api/types/time/timestamp.go +++ /dev/null @@ -1,124 +0,0 @@ -package time - -import ( - "fmt" - "math" - "strconv" - "strings" - "time" -) - -// These are additional predefined layouts for use in Time.Format and Time.Parse -// with --since and --until parameters for `docker logs` and `docker events` -const ( - rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone - rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone - dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00 - dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00 -) - -// GetTimestamp tries to parse given string as golang duration, -// then RFC3339 time and finally as a Unix timestamp. If -// any of these were successful, it returns a Unix timestamp -// as string otherwise returns the given value back. -// In case of duration input, the returned timestamp is computed -// as the given reference time minus the amount of the duration. -func GetTimestamp(value string, reference time.Time) (string, error) { - if d, err := time.ParseDuration(value); value != "0" && err == nil { - return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil - } - - var format string - var parseInLocation bool - - // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation - parseInLocation = !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) - - if strings.Contains(value, ".") { - if parseInLocation { - format = rFC3339NanoLocal - } else { - format = time.RFC3339Nano - } - } else if strings.Contains(value, "T") { - // we want the number of colons in the T portion of the timestamp - tcolons := strings.Count(value, ":") - // if parseInLocation is off and we have a +/- zone offset (not Z) then - // there will be an extra colon in the input for the tz offset subtract that - // colon from the tcolons count - if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 { - tcolons-- - } - if parseInLocation { - switch tcolons { - case 0: - format = "2006-01-02T15" - case 1: - format = "2006-01-02T15:04" - default: - format = rFC3339Local - } - } else { - switch tcolons { - case 0: - format = "2006-01-02T15Z07:00" - case 1: - format = "2006-01-02T15:04Z07:00" - default: - format = time.RFC3339 - } - } - } else if parseInLocation { - format = dateLocal - } else { - format = dateWithZone - } - - var t time.Time - var err error - - if parseInLocation { - t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone())) - } else { - t, err = time.Parse(format, value) - } - - if err != nil { - // if there is a `-` then its an RFC3339 like timestamp otherwise assume unixtimestamp - if strings.Contains(value, "-") { - return "", err // was probably an RFC3339 like timestamp but the parser failed with an error - } - return value, nil // unixtimestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server) - } - - return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil -} - -// ParseTimestamps returns seconds and nanoseconds from a timestamp that has the -// format "%d.%09d", time.Unix(), int64(time.Nanosecond())) -// if the incoming nanosecond portion is longer or shorter than 9 digits it is -// converted to nanoseconds. The expectation is that the seconds and -// seconds will be used to create a time variable. For example: -// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0) -// if err == nil since := time.Unix(seconds, nanoseconds) -// returns seconds as def(aultSeconds) if value == "" -func ParseTimestamps(value string, def int64) (int64, int64, error) { - if value == "" { - return def, 0, nil - } - sa := strings.SplitN(value, ".", 2) - s, err := strconv.ParseInt(sa[0], 10, 64) - if err != nil { - return s, 0, err - } - if len(sa) != 2 { - return s, 0, nil - } - n, err := strconv.ParseInt(sa[1], 10, 64) - if err != nil { - return s, n, err - } - // should already be in nanoseconds but just in case convert n to nanoseonds - n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1])))) - return s, n, nil -} diff --git a/vendor/github.com/hyperhq/hyper-api/types/types.go b/vendor/github.com/hyperhq/hyper-api/types/types.go deleted file mode 100644 index 34c1898cd..000000000 --- a/vendor/github.com/hyperhq/hyper-api/types/types.go +++ /dev/null @@ -1,562 +0,0 @@ -package types - -import ( - "os" - "time" - - "github.com/docker/go-connections/nat" - "github.com/hyperhq/hyper-api/types/container" - "github.com/hyperhq/hyper-api/types/network" - "github.com/hyperhq/hyper-api/types/registry" -) - -// ContainerCreateResponse contains the information returned to a client on the -// creation of a new container. -type ContainerCreateResponse struct { - // ID is the ID of the created container. - ID string `json:"Id"` - - // Warnings are any warnings encountered during the creation of the container. - Warnings []string `json:"Warnings"` -} - -// ContainerExecCreateResponse contains response of Remote API: -// POST "/containers/{name:.*}/exec" -type ContainerExecCreateResponse struct { - // ID is the exec ID. - ID string `json:"Id"` -} - -// ContainerUpdateResponse contains response of Remote API: -// POST "/containers/{name:.*}/update" -type ContainerUpdateResponse struct { - // Warnings are any warnings encountered during the updating of the container. - Warnings []string `json:"Warnings"` -} - -// AuthResponse contains response of Remote API: -// POST "/auth" -type AuthResponse struct { - // Status is the authentication status - Status string `json:"Status"` - - // IdentityToken is an opaque token used for authenticating - // a user after a successful login. - IdentityToken string `json:"IdentityToken,omitempty"` -} - -// ContainerWaitResponse contains response of Remote API: -// POST "/containers/"+containerID+"/wait" -type ContainerWaitResponse struct { - // StatusCode is the status code of the wait job - StatusCode int `json:"StatusCode"` -} - -// ContainerCommitResponse contains response of Remote API: -// POST "/commit?container="+containerID -type ContainerCommitResponse struct { - ID string `json:"Id"` -} - -// ContainerChange contains response of Remote API: -// GET "/containers/{name:.*}/changes" -type ContainerChange struct { - Kind int - Path string -} - -// ImageHistory contains response of Remote API: -// GET "/images/{name:.*}/history" -type ImageHistory struct { - ID string `json:"Id"` - Created int64 - CreatedBy string - Tags []string - Size int64 - Comment string -} - -// ImageDelete contains response of Remote API: -// DELETE "/images/{name:.*}" -type ImageDelete struct { - Untagged string `json:",omitempty"` - Deleted string `json:",omitempty"` -} - -// Image contains response of Remote API: -// GET "/images/json" -type Image struct { - ID string `json:"Id"` - ParentID string `json:"ParentId"` - RepoTags []string - RepoDigests []string - Created int64 - Size int64 - VirtualSize int64 - Labels map[string]string -} - -// GraphDriverData returns Image's graph driver config info -// when calling inspect command -type GraphDriverData struct { - Name string - Data map[string]string -} - -// RootFS returns Image's RootFS description including the layer IDs. -type RootFS struct { - Type string - Layers []string `json:",omitempty"` - BaseLayer string `json:",omitempty"` -} - -// ImageInspect contains response of Remote API: -// GET "/images/{name:.*}/json" -type ImageInspect struct { - ID string `json:"Id"` - RepoTags []string - RepoDigests []string - Parent string - Comment string - Created string - Container string - ContainerConfig *container.Config - DockerVersion string - Author string - Config *container.Config - Architecture string - Os string - Size int64 - VirtualSize int64 - GraphDriver GraphDriverData - RootFS RootFS -} - -// Port stores open ports info of container -// e.g. {"PrivatePort": 8080, "PublicPort": 80, "Type": "tcp"} -type Port struct { - IP string `json:",omitempty"` - PrivatePort int - PublicPort int `json:",omitempty"` - Type string -} - -// Container contains response of Remote API: -// GET "/containers/json" -type Container struct { - ID string `json:"Id"` - Names []string - Image string - ImageID string - Command string - Created int64 - Ports []Port - SizeRw int64 `json:",omitempty"` - SizeRootFs int64 `json:",omitempty"` - Labels map[string]string - State string - Status string - HostConfig struct { - NetworkMode string `json:",omitempty"` - } - NetworkSettings *SummaryNetworkSettings - Mounts []MountPoint -} - -// CopyConfig contains request body of Remote API: -// POST "/containers/"+containerID+"/copy" -type CopyConfig struct { - Resource string -} - -// ContainerPathStat is used to encode the header from -// GET "/containers/{name:.*}/archive" -// "Name" is the file or directory name. -type ContainerPathStat struct { - Name string `json:"name"` - Size int64 `json:"size"` - Mode os.FileMode `json:"mode"` - Mtime time.Time `json:"mtime"` - LinkTarget string `json:"linkTarget"` -} - -// ContainerProcessList contains response of Remote API: -// GET "/containers/{name:.*}/top" -type ContainerProcessList struct { - Processes [][]string - Titles []string -} - -// Version contains response of Remote API: -// GET "/version" -type Version struct { - Version string - APIVersion string `json:"ApiVersion"` - GitCommit string - GoVersion string - Os string - Arch string - KernelVersion string `json:",omitempty"` - Experimental bool `json:",omitempty"` - BuildTime string `json:",omitempty"` -} - -// Info contains response of Remote API: -// GET "/info" -type Info struct { - ID string - Containers int - ContainersRunning int - ContainersPaused int - ContainersStopped int - Images int - Driver string - DriverStatus [][2]string - SystemStatus [][2]string - Plugins PluginsInfo - MemoryLimit bool - SwapLimit bool - KernelMemory bool - CPUCfsPeriod bool `json:"CpuCfsPeriod"` - CPUCfsQuota bool `json:"CpuCfsQuota"` - CPUShares bool - CPUSet bool - IPv4Forwarding bool - BridgeNfIptables bool - BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` - Debug bool - NFd int - OomKillDisable bool - NGoroutines int - SystemTime string - ExecutionDriver string - LoggingDriver string - CgroupDriver string - NEventsListener int - KernelVersion string - OperatingSystem string - OSType string - Architecture string - IndexServerAddress string - RegistryConfig *registry.ServiceConfig - NCPU int - MemTotal int64 - DockerRootDir string - HTTPProxy string `json:"HttpProxy"` - HTTPSProxy string `json:"HttpsProxy"` - NoProxy string - Name string - Labels []string - ExperimentalBuild bool - ServerVersion string - ClusterStore string - ClusterAdvertise string - SecurityOptions []string -} - -// PluginsInfo is a temp struct holding Plugins name -// registered with docker daemon. It is used by Info struct -type PluginsInfo struct { - // List of Volume plugins registered - Volume []string - // List of Network plugins registered - Network []string - // List of Authorization plugins registered - Authorization []string -} - -// ExecStartCheck is a temp struct used by execStart -// Config fields is part of ExecConfig in runconfig package -type ExecStartCheck struct { - // ExecStart will first check if it's detached - Detach bool - // Check if there's a tty - Tty bool -} - -// ContainerState stores container's running state -// it's part of ContainerJSONBase and will return by "inspect" command -type ContainerState struct { - Status string - Running bool - Paused bool - Restarting bool - OOMKilled bool - Dead bool - Pid int - ExitCode int - Error string - StartedAt string - FinishedAt string -} - -// ContainerNode stores information about the node that a container -// is running on. It's only available in Docker Swarm -type ContainerNode struct { - ID string - IPAddress string `json:"IP"` - Addr string - Name string - Cpus int - Memory int - Labels map[string]string -} - -// ContainerJSONBase contains response of Remote API: -// GET "/containers/{name:.*}/json" -type ContainerJSONBase struct { - ID string `json:"Id"` - Created string - Path string - Args []string - State *ContainerState - Image string - ResolvConfPath string - HostnamePath string - HostsPath string - LogPath string - Node *ContainerNode `json:",omitempty"` - Name string - RestartCount int - Driver string - MountLabel string - ProcessLabel string - AppArmorProfile string - ExecIDs []string - HostConfig *container.HostConfig - GraphDriver GraphDriverData - SizeRw *int64 `json:",omitempty"` - SizeRootFs *int64 `json:",omitempty"` -} - -// ContainerJSON is newly used struct along with MountPoint -type ContainerJSON struct { - *ContainerJSONBase - Mounts []MountPoint - Config *container.Config - NetworkSettings *NetworkSettings -} - -// NetworkSettings exposes the network settings in the api -type NetworkSettings struct { - NetworkSettingsBase - DefaultNetworkSettings - Networks map[string]*network.EndpointSettings -} - -// SummaryNetworkSettings provides a summary of container's networks -// in /containers/json -type SummaryNetworkSettings struct { - Networks map[string]*network.EndpointSettings -} - -// NetworkSettingsBase holds basic information about networks -type NetworkSettingsBase struct { - Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`) - SandboxID string // SandboxID uniquely represents a container's network stack - HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface - LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix - LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address - Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port - SandboxKey string // SandboxKey identifies the sandbox - SecondaryIPAddresses []network.Address - SecondaryIPv6Addresses []network.Address -} - -// DefaultNetworkSettings holds network information -// during the 2 release deprecation period. -// It will be removed in Docker 1.11. -type DefaultNetworkSettings struct { - EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox - Gateway string // Gateway holds the gateway address for the network - GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address - GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address - IPAddress string // IPAddress holds the IPv4 address for the network - IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address - IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6 - MacAddress string // MacAddress holds the MAC address for the network -} - -// MountPoint represents a mount point configuration inside the container. -type MountPoint struct { - Name string `json:",omitempty"` - Source string - Destination string - Driver string `json:",omitempty"` - Mode string - RW bool - Propagation string -} - -type Snapshot struct { - ID string - Name string - Volume string - Size int -} - -type SnapshotsListResponse struct { - Snapshots []*Snapshot // Snapshots is the list of snapshots being returned - Warnings []string -} - -type SnapshotCreateRequest struct { - Name string // Name is the requested name of the snapshot - Volume string // Volume is the based volume which snapshot need - Force bool -} - -// Volume represents the configuration of a volume for the remote API -type Volume struct { - Name string // Name is the name of the volume - Driver string // Driver is the Driver name used to create the volume - Mountpoint string // Mountpoint is the location on disk of the volume - Status map[string]interface{} `json:",omitempty"` // Status provides low-level status information about the volume - Labels map[string]string // Labels is metadata specific to the volume - Scope string // Scope describes the level at which the volume exists (e.g. `global` for cluster-wide or `local` for machine level) - - CreatedAt time.Time -} - -// VolumesListResponse contains the response for the remote API: -// GET "/volumes" -type VolumesListResponse struct { - Volumes []*Volume // Volumes is the list of volumes being returned - Warnings []string // Warnings is a list of warnings that occurred when getting the list from the volume drivers -} - -// VolumeCreateRequest contains the response for the remote API: -// POST "/volumes/create" -type VolumeCreateRequest struct { - Name string // Name is the requested name of the volume - Driver string // Driver is the name of the driver that should be used to create the volume - DriverOpts map[string]string // DriverOpts holds the driver specific options to use for when creating the volume. - Labels map[string]string // Labels holds meta data specific to the volume being created. -} - -// VolumesInitializeResponse contains the response for the remote API: -// POST "/volumes/initialize" -type VolumesInitializeResponse struct { - Session string // Session identifies the current upload session. If empty, no session is established - Cookie string // Cookie is the cookie to use when uploading volume data - Uploaders map[string]string // Uploaders holds mappings from volume name to volume upload IDs -} - -type VolumeInitDesc struct { - Name string // Name of the volume to be initialized - Source string // Source of the volume -} - -// VolumesInitializeRequest contains the request for the remote API: -// POST "/volumes/initialize" -type VolumesInitializeRequest struct { - Reload bool // Reload original source set in previouse volume initialize operation - Volume []VolumeInitDesc // Volume init description -} - -// NetworkResource is the body of the "get network" http response message -type NetworkResource struct { - Name string // Name is the requested name of the volume - ID string `json:"Id"` // ID uniquely indentifies a network on a single machine - Scope string // Scope describes the level at which the network exists (e.g. `global` for cluster-wide or `local` for machine level) - Driver string // Driver is the Driver name used to create the volume (e.g. `bridge`, `overlay`) - EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6 - IPAM network.IPAM // IPAM is the network's IP Address Management - Internal bool // Internal respresents if the network is used internal only - Containers map[string]EndpointResource // Containers contains endpoints belonging to the network - Options map[string]string // Options holds the network specific options to use for when creating the network - Labels map[string]string // Labels holds metadata specific to the network being created -} - -// EndpointResource contains network resources allocated and used for a container in a network -type EndpointResource struct { - Name string - EndpointID string - MacAddress string - IPv4Address string - IPv6Address string -} - -// NetworkCreate is the expected body of the "create network" http request message -type NetworkCreate struct { - CheckDuplicate bool - Driver string - EnableIPv6 bool - IPAM network.IPAM - Internal bool - Options map[string]string - Labels map[string]string -} - -// NetworkCreateRequest is the request message sent to the server for network create call. -type NetworkCreateRequest struct { - NetworkCreate - Name string -} - -// NetworkCreateResponse is the response message sent by the server for network create call -type NetworkCreateResponse struct { - ID string `json:"Id"` - Warning string -} - -// NetworkConnect represents the data to be used to connect a container to the network -type NetworkConnect struct { - Container string - EndpointConfig *network.EndpointSettings `json:",omitempty"` -} - -// NetworkDisconnect represents the data to be used to disconnect a container from the network -type NetworkDisconnect struct { - Container string - Force bool -} - -// Checkpoint represents the details of a checkpoint -type Checkpoint struct { - Name string // Name is the name of the checkpoint -} - -type Rule struct { - // The direction in which the security group rule is applied. The only values - // allowed are "ingress" or "egress". For a compute instance, an ingress - // security group rule is applied to incoming (ingress) traffic for that - // instance. An egress rule is applied to traffic leaving the instance. - Direction string `json:"direction" yaml:"direction"` - - // Must be IPv4 or IPv6, and addresses represented in CIDR must match the - // ingress or egress rules. - EtherType string `json:"-" yaml:",omitempty"` - - // The minimum port number in the range that is matched by the security group - // rule. If the protocol is TCP or UDP, this value must be less than or equal - // to the value of the PortRangeMax attribute. If the protocol is ICMP, this - // value must be an ICMP type. - PortRangeMin int `json:"port_range_min" yaml:"port_range_min"` - - // The maximum port number in the range that is matched by the security group - // rule. The PortRangeMin attribute constrains the PortRangeMax attribute. If - // the protocol is ICMP, this value must be an ICMP type. - PortRangeMax int `json:"port_range_max" yaml:"port_range_max"` - - // The protocol that is matched by the security group rule. Valid values are - // "tcp", "udp", "icmp" or an empty string. - Protocol string `json:"protocol" yaml:"protocol"` - - // The remote IP prefix to be associated with this security group rule. You - // can specify either RemoteGroupID or RemoteIPPrefix . This attribute - // matches the specified IP prefix as the source IP address of the IP packet. - RemoteIPPrefix string `json:"remote_ip_prefix" yaml:"remote_ip_prefix"` - - // Optional. The remote group ID to be associated with this security group - // rule. You can specify either RemoteGroupID or RemoteIPPrefix. - RemoteGroupName string `json:"remote_group_name" yaml:"remote_group_name"` -} - -type SecurityGroup struct { - GroupName string `json:"name" yaml:"name"` - // The human-readable description of the group - Description string `json:"description" yaml:"description"` - // The rules which determine how this security group operates. - Rules []Rule `json:"rules" yaml:"rules"` -} diff --git a/vendor/github.com/hyperhq/hyper-api/types/versions/compare.go b/vendor/github.com/hyperhq/hyper-api/types/versions/compare.go deleted file mode 100644 index 611d4fed6..000000000 --- a/vendor/github.com/hyperhq/hyper-api/types/versions/compare.go +++ /dev/null @@ -1,62 +0,0 @@ -package versions - -import ( - "strconv" - "strings" -) - -// compare compares two version strings -// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise. -func compare(v1, v2 string) int { - var ( - currTab = strings.Split(v1, ".") - otherTab = strings.Split(v2, ".") - ) - - max := len(currTab) - if len(otherTab) > max { - max = len(otherTab) - } - for i := 0; i < max; i++ { - var currInt, otherInt int - - if len(currTab) > i { - currInt, _ = strconv.Atoi(currTab[i]) - } - if len(otherTab) > i { - otherInt, _ = strconv.Atoi(otherTab[i]) - } - if currInt > otherInt { - return 1 - } - if otherInt > currInt { - return -1 - } - } - return 0 -} - -// LessThan checks if a version is less than another -func LessThan(v, other string) bool { - return compare(v, other) == -1 -} - -// LessThanOrEqualTo checks if a version is less than or equal to another -func LessThanOrEqualTo(v, other string) bool { - return compare(v, other) <= 0 -} - -// GreaterThan checks if a version is greater than another -func GreaterThan(v, other string) bool { - return compare(v, other) == 1 -} - -// GreaterThanOrEqualTo checks if a version is greater than or equal to another -func GreaterThanOrEqualTo(v, other string) bool { - return compare(v, other) >= 0 -} - -// Equal checks if a version is equal to another -func Equal(v, other string) bool { - return compare(v, other) == 0 -} diff --git a/vendor/github.com/hyperhq/hypercli/AUTHORS b/vendor/github.com/hyperhq/hypercli/AUTHORS deleted file mode 100644 index cfc67e32f..000000000 --- a/vendor/github.com/hyperhq/hypercli/AUTHORS +++ /dev/null @@ -1,1033 +0,0 @@ -# This file lists all individuals having contributed content to the repository. -# For how it is generated, see `hack/generate-authors.sh`. - -Aanand Prasad -Aaron Davidson -Aaron Feng -Aaron Huslage -Aaron Welch -Abel Muiño -Abhinav Ajgaonkar -Abhishek Chanda -Abin Shahab -Adam Miller -Adam Singer -Aditya -Adria Casas -Adrian Mouat -Adrien Folie -Ahmed Kamal -Ahmet Alp Balkan -Aidan Hobson Sayers -AJ Bowen -Al Tobey -alambike -Alan Thompson -Albert Callarisa -Albert Zhang -Aleksa Sarai -Aleksandrs Fadins -Alena Prokharchyk -Alessandro Boch -Alessio Biancalana -Alex Gaynor -Alex Warhawk -Alexander Boyd -Alexander Larsson -Alexander Morozov -Alexander Shopov -Alexandr Morozov -Alexey Guskov -Alexey Kotlyarov -Alexey Shamrin -Alexis THOMAS -Allen Madsen -almoehi -Alvin Richards -amangoel -Amit Bakshi -Amy Lindburg -Anand Patil -AnandkumarPatel -Anchal Agrawal -Anders Janmyr -Andre Dublin <81dublin@gmail.com> -Andrea Luzzardi -Andrea Turli -Andreas Köhler -Andreas Savvides -Andreas Tiefenthaler -Andrew C. Bodine -Andrew Clay Shafer -Andrew Duckworth -Andrew France -Andrew Kuklewicz -Andrew Macgregor -Andrew Martin -Andrew Munsell -Andrew Weiss -Andrew Williams -Andrews Medina -Andrey Petrov -Andrey Stolbovsky -André Martins -Andy Chambers -andy diller -Andy Goldstein -Andy Kipp -Andy Rothfusz -Andy Smith -Andy Wilson -Anes Hasicic -Ankush Agarwal -Anthony Baire -Anthony Bishopric -Anton Löfgren -Anton Nikitin -Anton Tiurin -Antonio Murdaca -Antony Messerli -apocas -ArikaChen -Arnaud Porterie -Arthur Barr -Arthur Gautier -Asbjørn Enge -averagehuman -Avi Das -Avi Miller -Barnaby Gray -Barry Allard -Bartłomiej Piotrowski -bdevloed -Ben Firshman -Ben Sargent -Ben Severson -Ben Toews -Ben Wiklund -Benjamin Atkin -Benoit Chesneau -Bernerd Schaefer -Bert Goethals -Bharath Thiruveedula -Bhiraj Butala -bin liu -Blake Geno -bobby abbott -boucher -Bouke Haarsma -Boyd Hemphill -Bradley Cicenas -Bradley Wright -Brandon Liu -Brandon Philips -Brandon Rhodes -Brendan Dixon -Brent Salisbury -Brett Kochendorfer -Brian (bex) Exelbierd -Brian DeHamer -Brian Dorsey -Brian Flad -Brian Goff -Brian McCallister -Brian Olsen -Brian Shumate -Brice Jaglin -Briehan Lombaard -Bruno Bigras -Bruno Binet -Bruno Gazzera -Bruno Renié -Bryan Bess -Bryan Boreham -Bryan Matsuo -Bryan Murphy -buddhamagnet -Burke Libbey -Byung Kang -Caleb Spare -Calen Pennington -Cameron Boehmer -Carl X. Su -Cary -Casey Bisson -Charles Hooper -Charles Lindsay -Charles Merriam -Charlie Lewis -Chen Chao -Chen Hanxiao -cheney90 -Chewey -Chia-liang Kao -chli -Chris Alfonso -Chris Armstrong -Chris Khoo -Chris Snow -Chris St. Pierre -Chris Stivers -Chris Wahl -chrismckinnel -Christian Berendt -Christian Simon -Christian Stefanescu -ChristoperBiscardi -Christophe Troestler -Christopher Currie -Christopher Latham -Christopher Rigor -Christy Perez -Chun Chen -Ciro S. Costa -Clayton Coleman -Coenraad Loubser -Colin Dunklau -Colin Rice -Colin Walters -Colm Hally -Cory Forsyth -cressie176 -Cristian Staretu -Cruceru Calin-Cristian -Cyril F -Daan van Berkel -Daehyeok Mun -Dafydd Crosby -dalanlan -Damjan Georgievski -Dan Anolik -Dan Buch -Dan Cotora -Dan Griffin -Dan Hirsch -Dan Keder -Dan McPherson -Dan Stine -Dan Walsh -Dan Williams -Daniel Antlinger -Daniel Exner -Daniel Farrell -Daniel Garcia -Daniel Gasienica -Daniel Menet -Daniel Mizyrycki -Daniel Nephin -Daniel Norberg -Daniel Nordberg -Daniel Robinson -Daniel S -Daniel Von Fange -Daniel YC Lin -Daniel Zhang -Daniel, Dao Quang Minh -Danny Berger -Danny Yates -Darren Coxall -Darren Shepherd -Dave Henderson -David Anderson -David Calavera -David Corking -David Davis -David Gageot -David Gebler -David Mackey -David Mat -David Mcanulty -David Pelaez -David R. Jenni -David Röthlisberger -David Sissitka -David Xia -David Young -Davide Ceretti -Dawn Chen -decadent -Deng Guangxing -Deni Bertovic -Derek -Derek -Derek McGowan -Deric Crago -Deshi Xiao -Dinesh Subhraveti -DiuDiugirl -Djibril Koné -dkumor -Dmitry Demeshchuk -Dmitry Gusev -Dmitry V. Krivenok -Dolph Mathews -Dominik Finkbeiner -Dominik Honnef -Don Kirkby -Don Kjer -Don Spaulding -Doug Davis -Doug MacEachern -doug tangren -Dr Nic Williams -dragon788 -Dražen Lučanin -Dustin Sallings -Ed Costello -Edmund Wagner -Eiichi Tsukata -Eike Herzbach -Eivind Uggedal -Elias Probst -Elijah Zupancic -eluck -Emil Hernvall -Emily Maier -Emily Rose -Emir Ozer -Enguerran -Eohyung Lee -Eric Hanchrow -Eric Lee -Eric Myhre -Eric Paris -Eric Rafaloff -Eric Windisch -Eric-Olivier Lamey -Erik Dubbelboer -Erik Hollensbe -Erik Inge Bolsø -Erik Kristensen -Erno Hopearuoho -Erwin van der Koogh -Euan -Eugene Yakubovich -eugenkrizo -Evan Carmi -Evan Hazlett -Evan Krall -Evan Phoenix -Evan Wies -Evgeny Vereshchagin -Eystein Måløy Stenberg -ezbercih -Fabiano Rosas -Fabio Falci -Fabio Rehm -Fabrizio Regini -Faiz Khan -falmp -Fareed Dudhia -Felix Rabe -Felix Schindler -Ferenc Szabo -Fernando -Filipe Brandenburger -Flavio Castelli -FLGMwt -Florian Weingarten -Francisco Carriedo -Francisco Souza -Frank Herrmann -Frank Macreery -Frank Rosquin -Fred Lifton -Frederick F. Kautz IV -Frederik Loeffert -Freek Kalter -Félix Baylac-Jacqué -Gabe Rosenhouse -Gabor Nagy -Gabriel Monroy -Galen Sampson -Gareth Rushgrove -Gaurav -gautam, prasanna -GennadySpb -Geoffrey Bachelet -George MacRorie -George Xie -Gereon Frey -German DZ -Gert van Valkenhoef -Gianluca Borello -Giuseppe Mazzotta -Gleb Fotengauer-Malinovskiy -Gleb M Borisov -Glyn Normington -Goffert van Gool -golubbe -Gosuke Miyashita -Graydon Hoare -Greg Fausak -Greg Thornton -grossws -grunny -Guilherme Salgado -Guillaume Dufour -Guillaume J. Charmes -guoxiuyan -Gurjeet Singh -Guruprasad -Günter Zöchbauer -Hans Rødtang -Harald Albers -Harley Laue -Harry Zhang -He Simei -Hector Castro -Henning Sprang -Hobofan -Hollie Teal -Hong Xu -Hu Keping -Hu Tao -Huayi Zhang -Hugo Duncan -Hunter Blanks -Huu Nguyen -hyeongkyu.lee -hyp3rdino -Ian Babrou -Ian Bishop -Ian Bull -Ian Calvert -Ian Main -Ian Truslove -Iavael -Igor Dolzhikov -ILYA Khlopotov -imre Fitos -inglesp -Isaac Dupree -Isabel Jimenez -Isao Jonas -Ivan Fraixedes -J Bruni -J. Nunn -Jack Danger Canty -Jacob Atzen -Jacob Edelman -Jake Champlin -Jake Moshenko -jakedt -James Allen -James Carr -James DeFelice -James Harrison Fisher -James Kyle -James Lal -James Mills -James Turnbull -Jamie Hannaford -Jamshid Afshar -Jan Keromnes -Jan Koprowski -Jan Pazdziora -Jan Toebes -Jan-Jaap Driessen -Jana Radhakrishnan -Jared Biel -Jaroslaw Zabiello -jaseg -Jason Divock -Jason Giedymin -Jason Hall -Jason Livesay -Jason McVetta -Jason Plum -Jason Shepherd -Jason Smith -Jason Sommer -Jason Stangroome -Jay -Jean-Baptiste Barth -Jean-Baptiste Dalido -Jean-Paul Calderone -Jean-Tiare Le Bigot -Jeff Anderson -Jeff Lindsay -Jeff Nickoloff -Jeff Welch -Jeffrey Bolle -Jeffrey Morgan -Jeffrey van Gogh -Jeremy Grosser -Jesse Dearing -Jesse Dubay -Jessica Frazelle -Jezeniel Zapanta -jianbosun -Jilles Oldenbeuving -Jim Alateras -Jim Perrin -Jimmy Cuadra -Jimmy Puckett -jimmyxian -Jinsoo Park -Jiri Popelka -Jiří Župka -jjy -jmzwcn -Joe Beda -Joe Ferguson -Joe Gordon -Joe Shaw -Joe Van Dyk -Joel Friedly -Joel Handwell -Joey Gibson -Joffrey F -Johan Euphrosine -Johan Rydberg -Johannes 'fish' Ziemke -John Costa -John Feminella -John Gardiner Myers -John Gossman -John Howard (VM) -John OBrien III -John Tims -John Warwick -John Willis -Jon Wedaman -Jonas Pfenniger -Jonathan A. Sternberg -Jonathan Boulle -Jonathan Camp -Jonathan Dowland -Jonathan McCrohan -Jonathan Mueller -Jonathan Pares -Jonathan Rudenberg -Joost Cassee -Jordan Arentsen -Jordan Sissel -Joseph Anthony Pasquale Holsten -Joseph Hager -Joseph Kern -Josh -Josh Hawn -Josh Poimboeuf -Josiah Kiehl -José Tomás Albornoz -JP -Julian Taylor -Julien Barbier -Julien Bordellier -Julien Dubois -Jun-Ru Chang -Justin Force -Justin Plock -Justin Simonelis -Jyrki Puttonen -Jérôme Petazzoni -Jörg Thalheim -Kamil Domanski -Karan Lyons -kargakis -Karl Grzeszczak -Katie McLaughlin -Kato Kazuyoshi -Katrina Owen -Kawsar Saiyeed -Keli Hu -Ken Cochrane -Ken ICHIKAWA -Kent Johnson -Kevin "qwazerty" Houdebert -Kevin Clark -Kevin J. Lynagh -Kevin Menard -Kevin Wallace -Kevin Yap -Keyvan Fatehi -kies -Kim BKC Carlbacker -Kimbro Staken -Kiran Gangadharan -Kirill SIbirev -knappe -Kohei Tsuruta -Konrad Kleine -Konstantin Pelykh -Krasimir Georgiev -krrg -Kyle Conroy -kyu -Lachlan Coote -Lajos Papp -Lakshan Perera -lalyos -Lance Chen -Lance Kinley -Lars Kellogg-Stedman -Lars R. Damerow -Laurie Voss -leeplay -Lei Jitang -Len Weincier -Leszek Kowalski -Levi Gross -Lewis Marshall -Lewis Peckover -Liana Lo -Liang-Chi Hsieh -limsy -Liu Hua -Lloyd Dewolf -Lokesh Mandvekar -Lorenz Leutgeb -Lorenzo Fontana -Louis Opter -Luis Martínez de Bartolomé Izquierdo -lukaspustina -lukemarsden -Lénaïc Huard -Ma Shimiao -Mabin -Madhu Venugopal -Mahesh Tiyyagura -malnick -Malte Janduda -Manfred Touron -Manfred Zabarauskas -Manuel Meurer -Manuel Woelker -Marc Abramowitz -Marc Kuo -Marc Tamsky -Marco Hennings -Marcus Farkas -Marcus Linke -Marcus Ramberg -Marek Goldmann -Marian Marinov -Marianna -Marius Voila -Mark Allen -Mark McGranaghan -Mark West -Marko Mikulicic -Marko Tibold -Markus Fix -Martijn Dwars -Martijn van Oosterhout -Martin Honermeyer -Martin Redmond -Mary Anthony -Masahito Zembutsu -Mason Malone -Mateusz Sulima -Mathias Monnerville -Mathieu Le Marec - Pasquet -Matt Apperson -Matt Bachmann -Matt Bentley -Matt Haggard -Matt McCormick -Matthew Heon -Matthew Mayer -Matthew Mueller -Matthew Riley -Matthias Klumpp -Matthias Kühnle -mattymo -mattyw -mauriyouth -Max Shytikov -Maxim Kulkin -Maxim Treskin -Maxime Petazzoni -Meaglith Ma -meejah -Megan Kostick -Mehul Kar -Mengdi Gao -Mert Yazıcıoğlu -Michael A. Smith -Michael Brown -Michael Chiang -Michael Crosby -Michael Gorsuch -Michael Hudson-Doyle -Michael Neale -Michael Prokop -Michael Scharf -Michael Stapelberg -Michael Steinert -Michael Thies -Michael West -Michal Fojtik -Michal Jemala -Michal Minar -Michaël Pailloncy -Michiel@unhosted -Miguel Angel Fernández -Mihai Borobocea -Mike Chelen -Mike Dillon -Mike Gaffney -Mike Leone -Mike MacCana -Mike Naberezny -Mike Snitzer -Mikhail Sobolev -Mingzhen Feng -Mitch Capper -Mohit Soni -Morgante Pell -Morten Siebuhr -Moysés Borges -Mrunal Patel -mschurenko -Mustafa Akın -Médi-Rémi Hashim -Nan Monnand Deng -Naoki Orii -Natalie Parker -Nate Eagleson -Nate Jones -Nathan Hsieh -Nathan Kleyn -Nathan LeClaire -Neal McBurnett -Nelson Chen -Nghia Tran -Niall O'Higgins -Nicholas E. Rabenau -Nick Irvine -Nick Parker -Nick Payne -Nick Stenning -Nick Stinemates -Nicolas De loof -Nicolas Dudebout -Nicolas Goy -Nicolas Kaiser -NikolaMandic -nikolas -noducks -Nolan Darilek -nponeccop -Nuutti Kotivuori -nzwsch -O.S. Tezer -OddBloke -odk- -Oguz Bilgic -Oh Jinkyun -Ole Reifschneider -Olivier Gambier -pandrew -panticz -Pascal Borreli -Pascal Hartig -Patrick Devine -Patrick Hemmer -Patrick Stapleton -pattichen -Paul -paul -Paul Annesley -Paul Bellamy -Paul Bowsher -Paul Hammond -Paul Jimenez -Paul Lietar -Paul Morie -Paul Nasrat -Paul Weaver -Pavel Lobashov -Pavel Tikhomirov -Pavlos Ratis -Peggy Li -Peter Bourgon -Peter Braden -Peter Choi -Peter Dave Hello -Peter Ericson -Peter Esbensen -Peter Salvatore -Peter Volpe -Peter Waller -Phil -Phil Estes -Phil Spitler -Philipp Weissensteiner -Phillip Alexander -Piergiuliano Bossi -Pierre -Pierre Wacrenier -Pierre-Alain RIVIERE -Piotr Bogdan -pixelistik -Porjo -Pradeep Chhetri -Prasanna Gautam -Przemek Hejman -pysqz -Qiang Huang -Quentin Brossard -r0n22 -Rafal Jeczalik -Rafe Colton -Raghuram Devarakonda -Rajat Pandit -Rajdeep Dua -Ralph Bean -Ramkumar Ramachandra -Ramon van Alteren -Recursive Madman -Remi Rampin -Renato Riccieri Santos Zannon -resouer -rgstephens -Rhys Hiltner -Rich Seymour -Richard -Richard Burnison -Richard Harvey -Richard Metzler -Richo Healey -Rick Bradley -Rick van de Loo -Rick Wieman -Rik Nijessen -Robert Bachmann -Robert Bittle -Robert Obryk -Roberto G. Hashioka -Robin Speekenbrink -robpc -Rodrigo Vaz -Roel Van Nyen -Roger Peppe -Rohit Jnagal -Roland Huß -Roland Moriz -Ron Smits -root -Rovanion Luckey -Rudolph Gottesheim -Ryan Anderson -Ryan Aslett -Ryan Detzel -Ryan Fowler -Ryan O'Donnell -Ryan Seto -Ryan Thomas -Rémy Greinhofer -s. rannou -s00318865 -Sabin Basyal -Sachin Joshi -Sam Abed -Sam Alba -Sam Bailey -Sam J Sharpe -Sam Reis -Sam Rijs -Sami Wagiaalla -Samuel Andaya -Samuel PHAN -Sankar சங்கர் -Sanket Saurav -sapphiredev -Satnam Singh -satoru -Satoshi Amemiya -Scott Bessler -Scott Collier -Scott Johnston -Scott Stamp -Scott Walls -sdreyesg -Sean Cronin -Sean P. Kane -Sebastiaan van Steenis -Sebastiaan van Stijn -Senthil Kumar Selvaraj -SeongJae Park -Seongyeol Lim -Sergey Alekseev -Sergey Evstifeev -Shane Canon -shaunol -Shawn Landden -Shawn Siefkas -Shih-Yuan Lee -Shijiang Wei -Shishir Mahajan -shuai-z -sidharthamani -Silas Sewell -Simei He -Simon Eskildsen -Simon Leinen -Simon Taranto -Sindhu S -Sjoerd Langkemper -Solomon Hykes -Song Gao -Soulou -Sridatta Thatipamala -Sridhar Ratnakumar -Srini Brahmaroutu -Srini Brahmaroutu -Steeve Morin -Stefan Praszalowicz -Stephen Crosby -Stephen J Day -Steve Francia -Steve Koch -Steven Burgess -Steven Merrill -Steven Richards -Steven Taylor -Sven Dowideit -Swapnil Daingade -Sylvain Baubeau -Sylvain Bellemare -Sébastien -Sébastien Luttringer -Sébastien Stormacq -tang0th -Tangi COLIN -Tatsuki Sugiura -Tatsushi Inagaki -Ted M. Young -Tehmasp Chaudhri -Tejesh Mehta -Thatcher Peskens -theadactyl -Thell 'Bo' Fowler -Thermionix -Thijs Terlouw -Thomas Bikeev -Thomas Frössman -Thomas Hansen -Thomas LEVEIL -Thomas Orozco -Thomas Schroeter -Thomas Sjögren -Thomas Texier -Tianon Gravi -Tibor Vass -Tiffany Low -Tim Bosse -Tim Hockin -Tim Ruffles -Tim Smith -Tim Terhorst -Timothy Hobbs -tjwebb123 -tobe -Tobias Bieniek -Tobias Gesellchen -Tobias Schmidt -Tobias Schwab -Todd Lunter -Todd Whiteman -Tom Fotherby -Tom Hulihan -Tom Maaswinkel -Tomas Tomecek -Tomasz Lipinski -Tomasz Nurkiewicz -Tommaso Visconti -Tomáš Hrčka -Tonis Tiigi -Tonny Xu -Tony Daws -Tony Miller -Torstein Husebø -tpng -Travis Cline -Travis Thieman -Trent Ogren -Tristan Carel -Tyler Brock -Tzu-Jung Lee -Ulysse Carion -unknown -vagrant -Vaidas Jablonskis -vgeta -Victor Coisne -Victor Lyuboslavsky -Victor Marmol -Victor Vieux -Viktor Vojnovski -Vincent Batts -Vincent Bernat -Vincent Bernat -Vincent Demeester -Vincent Giersch -Vincent Mayers -Vincent Woo -Vinod Kulkarni -Vishal Doshi -Vishnu Kannan -Vitor Monteiro -Vivek Agarwal -Vivek Dasgupta -Vivek Goyal -Vladimir Bulyga -Vladimir Kirillov -Vladimir Rutsky -VladimirAus -Vojtech Vitek (V-Teq) -waitingkuo -Walter Leibbrandt -Walter Stanish -Ward Vandewege -WarheadsSE -Wayne Chang -Wei-Ting Kuo -Wes Morgan -Will Dietz -Will Rouesnel -Will Weaver -willhf -William Delanoue -William Henry -William Riancho -William Thurston -WiseTrem -wlan0 -Wolfgang Powisch -wonderflow -xamyzhao -XiaoBing Jiang -Xinzi Zhou -Xiuming Chen -xuzhaokui -y00277921 -Yahya -YAMADA Tsuyoshi -Yan Feng -Yang Bai -Yasunori Mahata -Yestin Sun -Yihang Ho -Yohei Ueda -Yongzhi Pan -Yuan Sun -Yurii Rashkovskii -Zac Dover -Zach Borboa -Zain Memon -Zaiste! -Zane DeGraffenried -Zefan Li -Zen Lin(Zhinan Lin) -Zhang Wei -Zhang Wentao -Zilin Du -zimbatm -Zoltan Tombol -zqh -Álex González -Álvaro Lázaro -尹吉峰 diff --git a/vendor/github.com/hyperhq/hypercli/LICENSE b/vendor/github.com/hyperhq/hypercli/LICENSE deleted file mode 100644 index 8f3fee627..000000000 --- a/vendor/github.com/hyperhq/hypercli/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2013-2016 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/hyperhq/hypercli/NOTICE b/vendor/github.com/hyperhq/hypercli/NOTICE deleted file mode 100644 index 942485dfd..000000000 --- a/vendor/github.com/hyperhq/hypercli/NOTICE +++ /dev/null @@ -1,8 +0,0 @@ -Hyper -Copyright 2015-2016 HyperHQ, Inc. - -This product contains software developed by Docker(https://www.docker.com), licensed -under the Apache2 License. - -This product contains software (https://github.com/kr/pty) developed -by Keith Rarick, licensed under the MIT License. diff --git a/vendor/github.com/hyperhq/hypercli/cliconfig/config.go b/vendor/github.com/hyperhq/hypercli/cliconfig/config.go deleted file mode 100644 index ff99d1e1b..000000000 --- a/vendor/github.com/hyperhq/hypercli/cliconfig/config.go +++ /dev/null @@ -1,292 +0,0 @@ -package cliconfig - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/hyperhq/hyper-api/types" - "github.com/hyperhq/hypercli/pkg/homedir" -) - -const ( - // ConfigFileName is the name of config file - ConfigFileName = "config.json" - oldConfigfile = ".dockercfg" - - // This constant is only used for really old config files when the - // URL wasn't saved as part of the config file and it was just - // assumed to be this value. - defaultIndexserver = "https://index.docker.io/v1/" - - DefaultHyperFormat = "tcp://*.hyper.sh:443" - DefaultHyperRegion = "us-west-1" - DefaultHyperEndpoint = "hyper.sh:443" -) - -var ( - configDir = os.Getenv("HYPER_CONFIG") -) - -func init() { - if configDir == "" { - configDir = filepath.Join(homedir.Get(), ".hyper") - } -} - -// ConfigDir returns the directory the configuration file is stored in -func ConfigDir() string { - return configDir -} - -// SetConfigDir sets the directory the configuration file is stored in -func SetConfigDir(dir string) { - configDir = dir -} - -type CloudConfig struct { - AccessKey string `json:"accesskey"` - SecretKey string `json:"secretkey"` - Region string `json:"region"` -} - -// ConfigFile ~/.docker/config.json file info -type ConfigFile struct { - AuthConfigs map[string]types.AuthConfig `json:"auths"` - CloudConfig map[string]CloudConfig `json:"clouds"` - HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"` - PsFormat string `json:"psFormat,omitempty"` - ImagesFormat string `json:"imagesFormat,omitempty"` - VolumesFormat string `json:"volumesFormat,omitempty"` - DetachKeys string `json:"detachKeys,omitempty"` - filename string // Note: not serialized - for internal use only -} - -// NewConfigFile initializes an empty configuration file for the given filename 'fn' -func NewConfigFile(fn string) *ConfigFile { - return &ConfigFile{ - AuthConfigs: make(map[string]types.AuthConfig), - CloudConfig: make(map[string]CloudConfig), - HTTPHeaders: make(map[string]string), - filename: fn, - } -} - -// LegacyLoadFromReader reads the non-nested configuration data given and sets up the -// auth config information with given directory and populates the receiver object -func (configFile *ConfigFile) LegacyLoadFromReader(configData io.Reader) error { - b, err := ioutil.ReadAll(configData) - if err != nil { - return err - } - - if err := json.Unmarshal(b, &configFile.AuthConfigs); err != nil { - arr := strings.Split(string(b), "\n") - if len(arr) < 2 { - return fmt.Errorf("The Auth config file is empty") - } - authConfig := types.AuthConfig{} - origAuth := strings.Split(arr[0], " = ") - if len(origAuth) != 2 { - return fmt.Errorf("Invalid Auth config file") - } - authConfig.Username, authConfig.Password, err = decodeAuth(origAuth[1]) - if err != nil { - return err - } - origEmail := strings.Split(arr[1], " = ") - if len(origEmail) != 2 { - return fmt.Errorf("Invalid Auth config file") - } - authConfig.Email = origEmail[1] - authConfig.ServerAddress = defaultIndexserver - configFile.AuthConfigs[defaultIndexserver] = authConfig - } else { - for k, authConfig := range configFile.AuthConfigs { - authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth) - if err != nil { - return err - } - authConfig.Auth = "" - authConfig.ServerAddress = k - configFile.AuthConfigs[k] = authConfig - } - } - return nil -} - -// LoadFromReader reads the configuration data given and sets up the auth config -// information with given directory and populates the receiver object -func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error { - if err := json.NewDecoder(configData).Decode(&configFile); err != nil { - return err - } - var err error - for addr, ac := range configFile.AuthConfigs { - ac.Username, ac.Password, err = decodeAuth(ac.Auth) - if err != nil { - return err - } - ac.Auth = "" - ac.ServerAddress = addr - configFile.AuthConfigs[addr] = ac - } - return nil -} - -// LegacyLoadFromReader is a convenience function that creates a ConfigFile object from -// a non-nested reader -func LegacyLoadFromReader(configData io.Reader) (*ConfigFile, error) { - configFile := ConfigFile{ - AuthConfigs: make(map[string]types.AuthConfig), - } - err := configFile.LegacyLoadFromReader(configData) - return &configFile, err -} - -// LoadFromReader is a convenience function that creates a ConfigFile object from -// a reader -func LoadFromReader(configData io.Reader) (*ConfigFile, error) { - configFile := ConfigFile{ - AuthConfigs: make(map[string]types.AuthConfig), - CloudConfig: make(map[string]CloudConfig), - } - err := configFile.LoadFromReader(configData) - return &configFile, err -} - -// Load reads the configuration files in the given directory, and sets up -// the auth config information and return values. -// FIXME: use the internal golang config parser -func Load(configDir string) (*ConfigFile, error) { - if configDir == "" { - configDir = ConfigDir() - } - - configFile := ConfigFile{ - AuthConfigs: make(map[string]types.AuthConfig), - CloudConfig: make(map[string]CloudConfig), - filename: filepath.Join(configDir, ConfigFileName), - } - - // Try happy path first - latest config file - if _, err := os.Stat(configFile.filename); err == nil { - file, err := os.Open(configFile.filename) - if err != nil { - return &configFile, fmt.Errorf("%s - %v", configFile.filename, err) - } - defer file.Close() - err = configFile.LoadFromReader(file) - if err != nil { - err = fmt.Errorf("%s - %v", configFile.filename, err) - } - return &configFile, err - } else if !os.IsNotExist(err) { - // if file is there but we can't stat it for any reason other - // than it doesn't exist then stop - return &configFile, fmt.Errorf("%s - %v", configFile.filename, err) - } - - // Can't find latest config file so check for the old one - confFile := filepath.Join(homedir.Get(), oldConfigfile) - if _, err := os.Stat(confFile); err != nil { - return &configFile, nil //missing file is not an error - } - file, err := os.Open(confFile) - if err != nil { - return &configFile, fmt.Errorf("%s - %v", confFile, err) - } - defer file.Close() - err = configFile.LegacyLoadFromReader(file) - if err != nil { - return &configFile, fmt.Errorf("%s - %v", confFile, err) - } - - if configFile.HTTPHeaders == nil { - configFile.HTTPHeaders = map[string]string{} - } - return &configFile, nil -} - -// SaveToWriter encodes and writes out all the authorization information to -// the given writer -func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error { - // Encode sensitive data into a new/temp struct - tmpAuthConfigs := make(map[string]types.AuthConfig, len(configFile.AuthConfigs)) - for k, authConfig := range configFile.AuthConfigs { - authCopy := authConfig - // encode and save the authstring, while blanking out the original fields - authCopy.Auth = encodeAuth(&authCopy) - authCopy.Username = "" - authCopy.Password = "" - authCopy.ServerAddress = "" - tmpAuthConfigs[k] = authCopy - } - - saveAuthConfigs := configFile.AuthConfigs - configFile.AuthConfigs = tmpAuthConfigs - defer func() { configFile.AuthConfigs = saveAuthConfigs }() - - data, err := json.MarshalIndent(configFile, "", "\t") - if err != nil { - return err - } - _, err = writer.Write(data) - return err -} - -// Save encodes and writes out all the authorization information -func (configFile *ConfigFile) Save() error { - if configFile.Filename() == "" { - return fmt.Errorf("Can't save config with empty filename") - } - - if err := os.MkdirAll(filepath.Dir(configFile.filename), 0700); err != nil { - return err - } - f, err := os.OpenFile(configFile.filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) - if err != nil { - return err - } - defer f.Close() - return configFile.SaveToWriter(f) -} - -// Filename returns the name of the configuration file -func (configFile *ConfigFile) Filename() string { - return configFile.filename -} - -// encodeAuth creates a base64 encoded string to containing authorization information -func encodeAuth(authConfig *types.AuthConfig) string { - authStr := authConfig.Username + ":" + authConfig.Password - msg := []byte(authStr) - encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg))) - base64.StdEncoding.Encode(encoded, msg) - return string(encoded) -} - -// decodeAuth decodes a base64 encoded string and returns username and password -func decodeAuth(authStr string) (string, string, error) { - decLen := base64.StdEncoding.DecodedLen(len(authStr)) - decoded := make([]byte, decLen) - authByte := []byte(authStr) - n, err := base64.StdEncoding.Decode(decoded, authByte) - if err != nil { - return "", "", err - } - if n > decLen { - return "", "", fmt.Errorf("Something went wrong decoding auth config") - } - arr := strings.SplitN(string(decoded), ":", 2) - if len(arr) != 2 { - return "", "", fmt.Errorf("Invalid auth configuration file") - } - password := strings.Trim(arr[1], "\x00") - return arr[0], password, nil -} diff --git a/vendor/github.com/hyperhq/hypercli/contrib/docker-engine-selinux/LICENSE b/vendor/github.com/hyperhq/hypercli/contrib/docker-engine-selinux/LICENSE deleted file mode 100644 index 5b6e7c66c..000000000 --- a/vendor/github.com/hyperhq/hypercli/contrib/docker-engine-selinux/LICENSE +++ /dev/null @@ -1,340 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 2, June 1991 - - Copyright (C) 1989, 1991 Free Software Foundation, Inc. - 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -License is intended to guarantee your freedom to share and change free -software--to make sure the software is free for all its users. This -General Public License applies to most of the Free Software -Foundation's software and to any other program whose authors commit to -using it. (Some other Free Software Foundation software is covered by -the GNU Library General Public License instead.) You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -this service if you wish), that you receive source code or can get it -if you want it, that you can change the software or use pieces of it -in new free programs; and that you know you can do these things. - - To protect your rights, we need to make restrictions that forbid -anyone to deny you these rights or to ask you to surrender the rights. -These restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must give the recipients all the rights that -you have. You must make sure that they, too, receive or can get the -source code. And you must show them these terms so they know their -rights. - - We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - - Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - - Finally, any free program is threatened constantly by software -patents. We wish to avoid the danger that redistributors of a free -program will individually obtain patent licenses, in effect making the -program proprietary. To prevent this, we have made it clear that any -patent must be licensed for everyone's free use or not licensed at all. - - The precise terms and conditions for copying, distribution and -modification follow. - - GNU GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License applies to any program or other work which contains -a notice placed by the copyright holder saying it may be distributed -under the terms of this General Public License. The "Program", below, -refers to any such program or work, and a "work based on the Program" -means either the Program or any derivative work under copyright law: -that is to say, a work containing the Program or a portion of it, -either verbatim or with modifications and/or translated into another -language. (Hereinafter, translation is included without limitation in -the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running the Program is not restricted, and the output from the Program -is covered only if its contents constitute a work based on the -Program (independent of having been made by running the Program). -Whether that is true depends on what the Program does. - - 1. You may copy and distribute verbatim copies of the Program's -source code as you receive it, in any medium, provided that you -conspicuously and appropriately publish on each copy an appropriate -copyright notice and disclaimer of warranty; keep intact all the -notices that refer to this License and to the absence of any warranty; -and give any other recipients of the Program a copy of this License -along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - - 2. You may modify your copy or copies of the Program or any portion -of it, thus forming a work based on the Program, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any - part thereof, to be licensed as a whole at no charge to all third - parties under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a - notice that there is no warranty (or else, saying that you provide - a warranty) and that users may redistribute the program under - these conditions, and telling the user how to view a copy of this - License. (Exception: if the Program itself is interactive but - does not normally print such an announcement, your work based on - the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Program, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections - 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your - cost of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer - to distribute corresponding source code. (This alternative is - allowed only for noncommercial distribution and only if you - received the program in object code or executable form with such - an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source -code means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to -control compilation and installation of the executable. However, as a -special exception, the source code distributed need not include -anything that is normally distributed (in either source or binary -form) with the major components (compiler, kernel, and so on) of the -operating system on which the executable runs, unless that component -itself accompanies the executable. - -If distribution of executable or object code is made by offering -access to copy from a designated place, then offering equivalent -access to copy the source code from the same place counts as -distribution of the source code, even though third parties are not -compelled to copy the source along with the object code. - - 4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt -otherwise to copy, modify, sublicense or distribute the Program is -void, and will automatically terminate your rights under this License. -However, parties who have received copies, or rights, from you under -this License will not have their licenses terminated so long as such -parties remain in full compliance. - - 5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Program or works based on it. - - 6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties to -this License. - - 7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Program at all. For example, if a patent -license would not permit royalty-free redistribution of the Program by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License -may add an explicit geographical distribution limitation excluding -those countries, so that distribution is permitted only in or among -countries not thus excluded. In such case, this License incorporates -the limitation as if written in the body of this License. - - 9. The Free Software Foundation may publish revised and/or new versions -of the General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and conditions -either of that version or of any later version published by the Free -Software Foundation. If the Program does not specify a version number of -this License, you may choose any version ever published by the Free Software -Foundation. - - 10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the author -to ask for permission. For software which is copyrighted by the Free -Software Foundation, write to the Free Software Foundation; we sometimes -make exceptions for this. Our decision will be guided by the two goals -of preserving the free status of all derivatives of our free software and -of promoting the sharing and reuse of software generally. - - NO WARRANTY - - 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY -FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN -OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES -PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED -OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS -TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE -PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, -REPAIR OR CORRECTION. - - 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR -REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, -INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING -OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED -TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY -YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER -PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this -when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, the commands you use may -be called something other than `show w' and `show c'; they could even be -mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program - `Gnomovision' (which makes passes at compilers) written by James Hacker. - - , 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into -proprietary programs. If your program is a subroutine library, you may -consider it more useful to permit linking proprietary applications with the -library. If this is what you want to do, use the GNU Library General -Public License instead of this License. diff --git a/vendor/github.com/hyperhq/hypercli/contrib/syntax/vim/LICENSE b/vendor/github.com/hyperhq/hypercli/contrib/syntax/vim/LICENSE deleted file mode 100644 index e67cdabd2..000000000 --- a/vendor/github.com/hyperhq/hypercli/contrib/syntax/vim/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2013 Honza Pokorny -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/hyperhq/hypercli/daemon/graphdriver/driver.go b/vendor/github.com/hyperhq/hypercli/daemon/graphdriver/driver.go deleted file mode 100644 index 74435bf4f..000000000 --- a/vendor/github.com/hyperhq/hypercli/daemon/graphdriver/driver.go +++ /dev/null @@ -1,218 +0,0 @@ -package graphdriver - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/Sirupsen/logrus" - - "github.com/hyperhq/hypercli/pkg/archive" - "github.com/hyperhq/hypercli/pkg/idtools" -) - -// FsMagic unsigned id of the filesystem in use. -type FsMagic uint32 - -const ( - // FsMagicUnsupported is a predefined constant value other than a valid filesystem id. - FsMagicUnsupported = FsMagic(0x00000000) -) - -var ( - // All registered drivers - drivers map[string]InitFunc - - // ErrNotSupported returned when driver is not supported. - ErrNotSupported = errors.New("driver not supported") - // ErrPrerequisites retuned when driver does not meet prerequisites. - ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)") - // ErrIncompatibleFS returned when file system is not supported. - ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver") -) - -// InitFunc initializes the storage driver. -type InitFunc func(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) - -// ProtoDriver defines the basic capabilities of a driver. -// This interface exists solely to be a minimum set of methods -// for client code which choose not to implement the entire Driver -// interface and use the NaiveDiffDriver wrapper constructor. -// -// Use of ProtoDriver directly by client code is not recommended. -type ProtoDriver interface { - // String returns a string representation of this driver. - String() string - // Create creates a new, empty, filesystem layer with the - // specified id and parent and mountLabel. Parent and mountLabel may be "". - Create(id, parent, mountLabel string) error - // Remove attempts to remove the filesystem layer with this id. - Remove(id string) error - // Get returns the mountpoint for the layered filesystem referred - // to by this id. You can optionally specify a mountLabel or "". - // Returns the absolute path to the mounted layered filesystem. - Get(id, mountLabel string) (dir string, err error) - // Put releases the system resources for the specified id, - // e.g, unmounting layered filesystem. - Put(id string) error - // Exists returns whether a filesystem layer with the specified - // ID exists on this driver. - Exists(id string) bool - // Status returns a set of key-value pairs which give low - // level diagnostic status about this driver. - Status() [][2]string - // Returns a set of key-value pairs which give low level information - // about the image/container driver is managing. - GetMetadata(id string) (map[string]string, error) - // Cleanup performs necessary tasks to release resources - // held by the driver, e.g., unmounting all layered filesystems - // known to this driver. - Cleanup() error -} - -// Driver is the interface for layered/snapshot file system drivers. -type Driver interface { - ProtoDriver - // Diff produces an archive of the changes between the specified - // layer and its parent layer which may be "". - Diff(id, parent string) (archive.Archive, error) - // Changes produces a list of changes between the specified layer - // and its parent layer. If parent is "", then all changes will be ADD changes. - Changes(id, parent string) ([]archive.Change, error) - // ApplyDiff extracts the changeset from the given diff into the - // layer with the specified id and parent, returning the size of the - // new layer in bytes. - // The archive.Reader must be an uncompressed stream. - ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) - // DiffSize calculates the changes between the specified id - // and its parent and returns the size in bytes of the changes - // relative to its base filesystem directory. - DiffSize(id, parent string) (size int64, err error) -} - -func init() { - drivers = make(map[string]InitFunc) -} - -// Register registers a InitFunc for the driver. -func Register(name string, initFunc InitFunc) error { - if _, exists := drivers[name]; exists { - return fmt.Errorf("Name already registered %s", name) - } - drivers[name] = initFunc - - return nil -} - -// GetDriver initializes and returns the registered driver -func GetDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) { - if initFunc, exists := drivers[name]; exists { - return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps) - } - if pluginDriver, err := lookupPlugin(name, home, options); err == nil { - return pluginDriver, nil - } - logrus.Errorf("Failed to GetDriver graph %s %s", name, home) - return nil, ErrNotSupported -} - -// getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins -func getBuiltinDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) { - if initFunc, exists := drivers[name]; exists { - return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps) - } - logrus.Errorf("Failed to built-in GetDriver graph %s %s", name, home) - return nil, ErrNotSupported -} - -// New creates the driver and initializes it at the specified root. -func New(root string, name string, options []string, uidMaps, gidMaps []idtools.IDMap) (driver Driver, err error) { - if name != "" { - logrus.Debugf("[graphdriver] trying provided driver %q", name) // so the logs show specified driver - return GetDriver(name, root, options, uidMaps, gidMaps) - } - - // Guess for prior driver - priorDrivers := scanPriorDrivers(root) - for _, name := range priority { - if name == "vfs" { - // don't use vfs even if there is state present. - continue - } - for _, prior := range priorDrivers { - // of the state found from prior drivers, check in order of our priority - // which we would prefer - if prior == name { - driver, err = getBuiltinDriver(name, root, options, uidMaps, gidMaps) - if err != nil { - // unlike below, we will return error here, because there is prior - // state, and now it is no longer supported/prereq/compatible, so - // something changed and needs attention. Otherwise the daemon's - // images would just "disappear". - logrus.Errorf("[graphdriver] prior storage driver %q failed: %s", name, err) - return nil, err - } - if err := checkPriorDriver(name, root); err != nil { - return nil, err - } - logrus.Infof("[graphdriver] using prior storage driver %q", name) - return driver, nil - } - } - } - - // Check for priority drivers first - for _, name := range priority { - driver, err = getBuiltinDriver(name, root, options, uidMaps, gidMaps) - if err != nil { - if err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS { - continue - } - return nil, err - } - return driver, nil - } - - // Check all registered drivers if no priority driver is found - for _, initFunc := range drivers { - if driver, err = initFunc(root, options, uidMaps, gidMaps); err != nil { - if err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS { - continue - } - return nil, err - } - return driver, nil - } - return nil, fmt.Errorf("No supported storage backend found") -} - -// scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers -func scanPriorDrivers(root string) []string { - priorDrivers := []string{} - for driver := range drivers { - p := filepath.Join(root, driver) - if _, err := os.Stat(p); err == nil && driver != "vfs" { - priorDrivers = append(priorDrivers, driver) - } - } - return priorDrivers -} - -func checkPriorDriver(name, root string) error { - priorDrivers := []string{} - for _, prior := range scanPriorDrivers(root) { - if prior != name && prior != "vfs" { - if _, err := os.Stat(filepath.Join(root, prior)); err == nil { - priorDrivers = append(priorDrivers, prior) - } - } - } - - if len(priorDrivers) > 0 { - - return fmt.Errorf("%q contains other graphdrivers: %s; Please cleanup or explicitly choose storage driver (-s )", root, strings.Join(priorDrivers, ",")) - } - return nil -} diff --git a/vendor/github.com/hyperhq/hypercli/daemon/graphdriver/driver_freebsd.go b/vendor/github.com/hyperhq/hypercli/daemon/graphdriver/driver_freebsd.go deleted file mode 100644 index be4eb5265..000000000 --- a/vendor/github.com/hyperhq/hypercli/daemon/graphdriver/driver_freebsd.go +++ /dev/null @@ -1,8 +0,0 @@ -package graphdriver - -var ( - // Slice of drivers that should be used in an order - priority = []string{ - "zfs", - } -) diff --git a/vendor/github.com/hyperhq/hypercli/daemon/graphdriver/driver_linux.go b/vendor/github.com/hyperhq/hypercli/daemon/graphdriver/driver_linux.go deleted file mode 100644 index e64ab1bfa..000000000 --- a/vendor/github.com/hyperhq/hypercli/daemon/graphdriver/driver_linux.go +++ /dev/null @@ -1,88 +0,0 @@ -// +build linux - -package graphdriver - -import ( - "path/filepath" - "syscall" -) - -const ( - // FsMagicAufs filesystem id for Aufs - FsMagicAufs = FsMagic(0x61756673) - // FsMagicBtrfs filesystem id for Btrfs - FsMagicBtrfs = FsMagic(0x9123683E) - // FsMagicCramfs filesystem id for Cramfs - FsMagicCramfs = FsMagic(0x28cd3d45) - // FsMagicExtfs filesystem id for Extfs - FsMagicExtfs = FsMagic(0x0000EF53) - // FsMagicF2fs filesystem id for F2fs - FsMagicF2fs = FsMagic(0xF2F52010) - // FsMagicGPFS filesystem id for GPFS - FsMagicGPFS = FsMagic(0x47504653) - // FsMagicJffs2Fs filesystem if for Jffs2Fs - FsMagicJffs2Fs = FsMagic(0x000072b6) - // FsMagicJfs filesystem id for Jfs - FsMagicJfs = FsMagic(0x3153464a) - // FsMagicNfsFs filesystem id for NfsFs - FsMagicNfsFs = FsMagic(0x00006969) - // FsMagicRAMFs filesystem id for RamFs - FsMagicRAMFs = FsMagic(0x858458f6) - // FsMagicReiserFs filesystem id for ReiserFs - FsMagicReiserFs = FsMagic(0x52654973) - // FsMagicSmbFs filesystem id for SmbFs - FsMagicSmbFs = FsMagic(0x0000517B) - // FsMagicSquashFs filesystem id for SquashFs - FsMagicSquashFs = FsMagic(0x73717368) - // FsMagicTmpFs filesystem id for TmpFs - FsMagicTmpFs = FsMagic(0x01021994) - // FsMagicVxFS filesystem id for VxFs - FsMagicVxFS = FsMagic(0xa501fcf5) - // FsMagicXfs filesystem id for Xfs - FsMagicXfs = FsMagic(0x58465342) - // FsMagicZfs filesystem id for Zfs - FsMagicZfs = FsMagic(0x2fc12fc1) -) - -var ( - // Slice of drivers that should be used in an order - priority = []string{ - "aufs", - "btrfs", - "zfs", - "devicemapper", - "overlay", - "vfs", - } - - // FsNames maps filesystem id to name of the filesystem. - FsNames = map[FsMagic]string{ - FsMagicAufs: "aufs", - FsMagicBtrfs: "btrfs", - FsMagicCramfs: "cramfs", - FsMagicExtfs: "extfs", - FsMagicF2fs: "f2fs", - FsMagicGPFS: "gpfs", - FsMagicJffs2Fs: "jffs2", - FsMagicJfs: "jfs", - FsMagicNfsFs: "nfs", - FsMagicRAMFs: "ramfs", - FsMagicReiserFs: "reiserfs", - FsMagicSmbFs: "smb", - FsMagicSquashFs: "squashfs", - FsMagicTmpFs: "tmpfs", - FsMagicUnsupported: "unsupported", - FsMagicVxFS: "vxfs", - FsMagicXfs: "xfs", - FsMagicZfs: "zfs", - } -) - -// GetFSMagic returns the filesystem id given the path. -func GetFSMagic(rootpath string) (FsMagic, error) { - var buf syscall.Statfs_t - if err := syscall.Statfs(filepath.Dir(rootpath), &buf); err != nil { - return 0, err - } - return FsMagic(buf.Type), nil -} diff --git a/vendor/github.com/hyperhq/hypercli/daemon/graphdriver/driver_unsupported.go b/vendor/github.com/hyperhq/hypercli/daemon/graphdriver/driver_unsupported.go deleted file mode 100644 index b3f685730..000000000 --- a/vendor/github.com/hyperhq/hypercli/daemon/graphdriver/driver_unsupported.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !linux,!windows,!freebsd - -package graphdriver - -var ( - // Slice of drivers that should be used in an order - priority = []string{ - "unsupported", - } -) - -// GetFSMagic returns the filesystem id given the path. -func GetFSMagic(rootpath string) (FsMagic, error) { - return FsMagicUnsupported, nil -} diff --git a/vendor/github.com/hyperhq/hypercli/daemon/graphdriver/driver_windows.go b/vendor/github.com/hyperhq/hypercli/daemon/graphdriver/driver_windows.go deleted file mode 100644 index 6c09affae..000000000 --- a/vendor/github.com/hyperhq/hypercli/daemon/graphdriver/driver_windows.go +++ /dev/null @@ -1,16 +0,0 @@ -package graphdriver - -var ( - // Slice of drivers that should be used in order - priority = []string{ - "windowsfilter", - "windowsdiff", - "vfs", - } -) - -// GetFSMagic returns the filesystem id given the path. -func GetFSMagic(rootpath string) (FsMagic, error) { - // Note it is OK to return FsMagicUnsupported on Windows. - return FsMagicUnsupported, nil -} diff --git a/vendor/github.com/hyperhq/hypercli/daemon/graphdriver/fsdiff.go b/vendor/github.com/hyperhq/hypercli/daemon/graphdriver/fsdiff.go deleted file mode 100644 index 3d776b33a..000000000 --- a/vendor/github.com/hyperhq/hypercli/daemon/graphdriver/fsdiff.go +++ /dev/null @@ -1,162 +0,0 @@ -package graphdriver - -import ( - "time" - - "github.com/Sirupsen/logrus" - - "github.com/hyperhq/hypercli/pkg/archive" - "github.com/hyperhq/hypercli/pkg/chrootarchive" - "github.com/hyperhq/hypercli/pkg/idtools" - "github.com/hyperhq/hypercli/pkg/ioutils" -) - -var ( - // ApplyUncompressedLayer defines the unpack method used by the graph - // driver. - ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer -) - -// NaiveDiffDriver takes a ProtoDriver and adds the -// capability of the Diffing methods which it may or may not -// support on its own. See the comment on the exported -// NewNaiveDiffDriver function below. -// Notably, the AUFS driver doesn't need to be wrapped like this. -type NaiveDiffDriver struct { - ProtoDriver - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap -} - -// NewNaiveDiffDriver returns a fully functional driver that wraps the -// given ProtoDriver and adds the capability of the following methods which -// it may or may not support on its own: -// Diff(id, parent string) (archive.Archive, error) -// Changes(id, parent string) ([]archive.Change, error) -// ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) -// DiffSize(id, parent string) (size int64, err error) -func NewNaiveDiffDriver(driver ProtoDriver, uidMaps, gidMaps []idtools.IDMap) Driver { - return &NaiveDiffDriver{ProtoDriver: driver, - uidMaps: uidMaps, - gidMaps: gidMaps} -} - -// Diff produces an archive of the changes between the specified -// layer and its parent layer which may be "". -func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch archive.Archive, err error) { - driver := gdw.ProtoDriver - - layerFs, err := driver.Get(id, "") - if err != nil { - return nil, err - } - - defer func() { - if err != nil { - driver.Put(id) - } - }() - - if parent == "" { - archive, err := archive.Tar(layerFs, archive.Uncompressed) - if err != nil { - return nil, err - } - return ioutils.NewReadCloserWrapper(archive, func() error { - err := archive.Close() - driver.Put(id) - return err - }), nil - } - - parentFs, err := driver.Get(parent, "") - if err != nil { - return nil, err - } - defer driver.Put(parent) - - changes, err := archive.ChangesDirs(layerFs, parentFs) - if err != nil { - return nil, err - } - - archive, err := archive.ExportChanges(layerFs, changes, gdw.uidMaps, gdw.gidMaps) - if err != nil { - return nil, err - } - - return ioutils.NewReadCloserWrapper(archive, func() error { - err := archive.Close() - driver.Put(id) - return err - }), nil -} - -// Changes produces a list of changes between the specified layer -// and its parent layer. If parent is "", then all changes will be ADD changes. -func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) { - driver := gdw.ProtoDriver - - layerFs, err := driver.Get(id, "") - if err != nil { - return nil, err - } - defer driver.Put(id) - - parentFs := "" - - if parent != "" { - parentFs, err = driver.Get(parent, "") - if err != nil { - return nil, err - } - defer driver.Put(parent) - } - - return archive.ChangesDirs(layerFs, parentFs) -} - -// ApplyDiff extracts the changeset from the given diff into the -// layer with the specified id and parent, returning the size of the -// new layer in bytes. -func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) { - driver := gdw.ProtoDriver - - // Mount the root filesystem so we can apply the diff/layer. - layerFs, err := driver.Get(id, "") - if err != nil { - return - } - defer driver.Put(id) - - options := &archive.TarOptions{UIDMaps: gdw.uidMaps, - GIDMaps: gdw.gidMaps} - start := time.Now().UTC() - logrus.Debugf("Start untar layer") - if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil { - return - } - logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) - - return -} - -// DiffSize calculates the changes between the specified layer -// and its parent and returns the size in bytes of the changes -// relative to its base filesystem directory. -func (gdw *NaiveDiffDriver) DiffSize(id, parent string) (size int64, err error) { - driver := gdw.ProtoDriver - - changes, err := gdw.Changes(id, parent) - if err != nil { - return - } - - layerFs, err := driver.Get(id, "") - if err != nil { - return - } - defer driver.Put(id) - - return archive.ChangesSize(layerFs, changes), nil -} diff --git a/vendor/github.com/hyperhq/hypercli/daemon/graphdriver/plugin.go b/vendor/github.com/hyperhq/hypercli/daemon/graphdriver/plugin.go deleted file mode 100644 index db12a6b5d..000000000 --- a/vendor/github.com/hyperhq/hypercli/daemon/graphdriver/plugin.go +++ /dev/null @@ -1,32 +0,0 @@ -// +build experimental - -package graphdriver - -import ( - "fmt" - "io" - - "github.com/hyperhq/hypercli/pkg/plugins" -) - -type pluginClient interface { - // Call calls the specified method with the specified arguments for the plugin. - Call(string, interface{}, interface{}) error - // Stream calls the specified method with the specified arguments for the plugin and returns the response IO stream - Stream(string, interface{}) (io.ReadCloser, error) - // SendFile calls the specified method, and passes through the IO stream - SendFile(string, io.Reader, interface{}) error -} - -func lookupPlugin(name, home string, opts []string) (Driver, error) { - pl, err := plugins.Get(name, "GraphDriver") - if err != nil { - return nil, fmt.Errorf("Error looking up graphdriver plugin %s: %v", name, err) - } - return newPluginDriver(name, home, opts, pl.Client) -} - -func newPluginDriver(name, home string, opts []string, c pluginClient) (Driver, error) { - proxy := &graphDriverProxy{name, c} - return proxy, proxy.Init(home, opts) -} diff --git a/vendor/github.com/hyperhq/hypercli/daemon/graphdriver/plugin_unsupported.go b/vendor/github.com/hyperhq/hypercli/daemon/graphdriver/plugin_unsupported.go deleted file mode 100644 index daa7a170e..000000000 --- a/vendor/github.com/hyperhq/hypercli/daemon/graphdriver/plugin_unsupported.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !experimental - -package graphdriver - -func lookupPlugin(name, home string, opts []string) (Driver, error) { - return nil, ErrNotSupported -} diff --git a/vendor/github.com/hyperhq/hypercli/daemon/graphdriver/proxy.go b/vendor/github.com/hyperhq/hypercli/daemon/graphdriver/proxy.go deleted file mode 100644 index 79dc8c06d..000000000 --- a/vendor/github.com/hyperhq/hypercli/daemon/graphdriver/proxy.go +++ /dev/null @@ -1,210 +0,0 @@ -// +build experimental - -package graphdriver - -import ( - "errors" - "fmt" - - "github.com/hyperhq/hypercli/pkg/archive" -) - -type graphDriverProxy struct { - name string - client pluginClient -} - -type graphDriverRequest struct { - ID string `json:",omitempty"` - Parent string `json:",omitempty"` - MountLabel string `json:",omitempty"` -} - -type graphDriverResponse struct { - Err string `json:",omitempty"` - Dir string `json:",omitempty"` - Exists bool `json:",omitempty"` - Status [][2]string `json:",omitempty"` - Changes []archive.Change `json:",omitempty"` - Size int64 `json:",omitempty"` - Metadata map[string]string `json:",omitempty"` -} - -type graphDriverInitRequest struct { - Home string - Opts []string -} - -func (d *graphDriverProxy) Init(home string, opts []string) error { - args := &graphDriverInitRequest{ - Home: home, - Opts: opts, - } - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.Init", args, &ret); err != nil { - return err - } - if ret.Err != "" { - return errors.New(ret.Err) - } - return nil -} - -func (d *graphDriverProxy) String() string { - return d.name -} - -func (d *graphDriverProxy) Create(id, parent, mountLabel string) error { - args := &graphDriverRequest{ - ID: id, - Parent: parent, - MountLabel: mountLabel, - } - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.Create", args, &ret); err != nil { - return err - } - if ret.Err != "" { - return errors.New(ret.Err) - } - return nil -} - -func (d *graphDriverProxy) Remove(id string) error { - args := &graphDriverRequest{ID: id} - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.Remove", args, &ret); err != nil { - return err - } - if ret.Err != "" { - return errors.New(ret.Err) - } - return nil -} - -func (d *graphDriverProxy) Get(id, mountLabel string) (string, error) { - args := &graphDriverRequest{ - ID: id, - MountLabel: mountLabel, - } - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.Get", args, &ret); err != nil { - return "", err - } - var err error - if ret.Err != "" { - err = errors.New(ret.Err) - } - return ret.Dir, err -} - -func (d *graphDriverProxy) Put(id string) error { - args := &graphDriverRequest{ID: id} - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.Put", args, &ret); err != nil { - return err - } - if ret.Err != "" { - return errors.New(ret.Err) - } - return nil -} - -func (d *graphDriverProxy) Exists(id string) bool { - args := &graphDriverRequest{ID: id} - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.Exists", args, &ret); err != nil { - return false - } - return ret.Exists -} - -func (d *graphDriverProxy) Status() [][2]string { - args := &graphDriverRequest{} - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.Status", args, &ret); err != nil { - return nil - } - return ret.Status -} - -func (d *graphDriverProxy) GetMetadata(id string) (map[string]string, error) { - args := &graphDriverRequest{ - ID: id, - } - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.GetMetadata", args, &ret); err != nil { - return nil, err - } - if ret.Err != "" { - return nil, errors.New(ret.Err) - } - return ret.Metadata, nil -} - -func (d *graphDriverProxy) Cleanup() error { - args := &graphDriverRequest{} - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.Cleanup", args, &ret); err != nil { - return nil - } - if ret.Err != "" { - return errors.New(ret.Err) - } - return nil -} - -func (d *graphDriverProxy) Diff(id, parent string) (archive.Archive, error) { - args := &graphDriverRequest{ - ID: id, - Parent: parent, - } - body, err := d.client.Stream("GraphDriver.Diff", args) - if err != nil { - body.Close() - return nil, err - } - return archive.Archive(body), nil -} - -func (d *graphDriverProxy) Changes(id, parent string) ([]archive.Change, error) { - args := &graphDriverRequest{ - ID: id, - Parent: parent, - } - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.Changes", args, &ret); err != nil { - return nil, err - } - if ret.Err != "" { - return nil, errors.New(ret.Err) - } - - return ret.Changes, nil -} - -func (d *graphDriverProxy) ApplyDiff(id, parent string, diff archive.Reader) (int64, error) { - var ret graphDriverResponse - if err := d.client.SendFile(fmt.Sprintf("GraphDriver.ApplyDiff?id=%s&parent=%s", id, parent), diff, &ret); err != nil { - return -1, err - } - if ret.Err != "" { - return -1, errors.New(ret.Err) - } - return ret.Size, nil -} - -func (d *graphDriverProxy) DiffSize(id, parent string) (int64, error) { - args := &graphDriverRequest{ - ID: id, - Parent: parent, - } - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.DiffSize", args, &ret); err != nil { - return -1, err - } - if ret.Err != "" { - return -1, errors.New(ret.Err) - } - return ret.Size, nil -} diff --git a/vendor/github.com/hyperhq/hypercli/docs/static_files/contributors.png b/vendor/github.com/hyperhq/hypercli/docs/static_files/contributors.png deleted file mode 100644 index 63c0a0c09b58bce2e1ade867760a937612934202..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 23100 zcmb@OV{j(E_x888ZEbB^Tbp;=t!>=3ZQHint!;a2+qU)J@BCgqZ=W|wCYelTGH1?Q z`J8JKt|%{o0E-Lzo_Y4dhY>#tUQd@oOt3`hoOL|BKcJqN3Vxy$AhV?ax|X zwP7_xdmcs=u4=&I4-7>PVT51Et0V?6adF`Fly>RguBKa_?u?$lQ0phh>{!;bJF?oY zC)U*RkjI zsZ{9ymFQPP=Ffni?-C9P+OC9;+m(1`#sifR2o?cG#SMq<7^FFga@MBPTZc}}8{OtP z!CXnufoQzjhXU6pDu?FO?q?N~x)gG+s5_)nwFPTwBYb7~CibK72sE%1)SrJd^${!^ zEHw&Dtkf^i4vaU^uU^}gy=)4@H#0a?oWg*dF~t!rx~8uPt`)MO;*-1xScpDwk8Yd6 zV|oWAE&v<#MZbeL_k~-u+`8|Wmvs+s;S27t7f}( zmXlHcCaBX3IF<@+Er8_&TT#hEUm$9AWJhM+a2DG>$2af5AN|C9xfYFRFuLL?G1+^h z)!T!@8{JnMuGS?na}ca1a>7RB3N7|6Ju30;Ef@hw{DK01K4-F7z?2a1d z6xYB&;G|Ie<*=I5*<4$#T&O(X^Lx-a9hgg%5-@!cNcW>GOGntHUgUcesfkwoDU2}( zp^XvJc&>8m=-3u88Wgay@j*@{{4f45)4=9Jiy38dDa1naT<3nu?&eE&0d1Rn`W{JW z+U8LtqZPyYp%!)zg2B{3Lbkcu{ZEP<^T>cg)(%UgkghgLTC7~CpI^-|9klpVyAWH7 zHV%sv1WpJwWSw3XfkAq0r09b)a@xX9bKHO9xrn_r-#E&xkbhxq3~^b?MMfTMrD`?h zS1|01CL;=KT9+@O+wU!YZtx6RE#mF=ft=3IwPZU@AV(b-C>&!>dN-kzGwftdTF&rd zV2HwHnX$g0Btk=EJ0UuWB9?h;sVo?F6sphY7i^nLaqx;0#|-1_NIN znQ=;!ID#{lTECe0DwM(&vRwJD*)Gz$_3@es`+CpqM7Q%IYfT1VZ34@S>|}^$zHlkX zqF8IhE*z}@E5)`|)vYmMV8Fuuwl#g)M;dnrznCsx^)FDd&2BGw3%jIv9)+lZGfX0) zYXR{+(nD1^wvq>61Pi@=cYUqu1fPu({)DXZT}E*gsEJb65~g}4&2#wpn~EreB`Aiq zg7Pbi@B7eVz2$nQVzSKlXewM|_q0II6N#6^m%zv+)CQ#e{aek6h^j*oSezfg@@ES8MT#Kt;stNsM)7<|y0gZ}3|BfisAPKfpssv+PnM%1VQY)$rhkVcvBn)CNqo=Dw6JxNoo5 z+?S)IhN;I!PR9W}TV0%Cg%P|=Rwc*vuJcWXX}&i%Q7k8eY_YQNJh(%Oe)&`@<6og9 zh)NBcA`1moGJ-+ovIx@i7-yFFK0~yQnAM8Sr9^6XuoX!AbC?P$q2~6iiAfl=nAEpt ztl<$HhP=f;xkH2R(?;rl($kw#&`v9rb%-xrT#kQOtTDMAWxbBS4HQeav)Z{pn`{*f zNzch%el?n$o;f}o{1b;5TUzc)DubRG z_P6s%V$>GskUzg(#JC12ZBkjI>s2sl+{Py{-jh`1@wQ-7m7=K;DQA=*epw=LFrIgt zbYj&O@gA1R^a2EOt0z3ZbP);;o)nTj2r^hI#QW~6(^`bRt$iy zl()J$C_Q(fa_hBYvjNUJAg)_uN(5Yc^KwcJf^P4KI#lO(yt;Dt%5_C^SN+!I$*8R( z9M?!Cjh>#_`&aK}=msRvaedcGO2XFZhr>Gl8X2nHL0P#*@kV6UApxH+v_8;{N_M0Pl$r%yaDSqn6uqqIJBFa=5ojh-~A-^ zPmeoEHRJI_JUY^dNhY?vAL@v^rHqsPz1_@V2OKVYs(b?9_I#N5q3U0Chzi|5KU0n> zRmfWd!I0N3rM@IH1c^x0U4q4o!Kk0L38x(yTR!)X$*8xmmY-}#7eIVJZkfXzV88`x z!$Al&+Ttx~wK}v8dyHMT_ubr=!e}@<74}62y?&~OkaF>|qt=A81dXQ63s+)pM>@<@++StGdv_h{M{^*6(4wSA?L&`h(vdvy=jjQhB$s3~ob z3Y`hzC%|P0cDQR_1}PGC$1c0J>5p^z_2l~T?oQ@{K_oKU9b&I1Oyd<(gP?o$^i(oi z9e5d7eY~#v%EB)Gf-*+X+6%$qZ+Vc+9vrd(yTQ-(`=Y7~>G&eQq3jTPK z?+SNz?L*%+JQaLZ}9TlRVYrGKEHC-veo<>48F+QB1OX_b-y1 ziFZCun;tAP&7Y0IJMx=&mUN}<`z*`t?N2aY?vk(MCdW=@(=uNkl~T80k&p~)NiMJI z_V}mOu+X9?basLL{_S~lM#kyDMHEU41?1ZZYu1zXWaw)kUC>@9r_yDtAL81{(T__n zc9AR&U8ZH_zL$h_w_(L9u3!zbOuU;*atF*!Dy$yc+6jHptGZq~HnX z=%6U+11T1-&TdgZrFtxrFlCioFmWS8XF@Re;1RY?ajff%ePKRjy6$9|i_b(x*Us2^ z%v;$!7$sR!p8U~Ig?=`jzhrjarKGU>D6stp{3uUyIHUa{YK?X8$H#H%cXf^v!fhH` zd1;q$Ji0XkZSz3MoDGoRY4HrVPP`SD6+hx8B*&} z^G~lwg9($F&bIPYuHK!?;+*1Yn|Z8zXgMP~u^^s+)+BZTyV`i5c+jAnK$g?Dnzz)l z!Lp;k+7bS(+$`DHlDO2k6A{&JPK_#sC_IJ<_YZn9bCeXD@>Dj*oU~^Oipd|a)1j@@ zd9c!{Z-pq$UXCd0r8J=tRefM@qn=`aPczYp}J#$p75w76m2L2&@gGH$xJ5 zEeP7RUtVAh)Jbl6Yn%30+Y`!kdZ6Ddopj|r8m7J_48skVI%iLci*%x^Cd%eekU|aJ za?ga`snZG8h>&sMx#noLwhE~?*DOd#a%inn;Nbr3K6u?*D6_Fo4*LmTtgal6xQ1MT z1fvmX5TVixN1@_b0Lk-qCbFQ0`YXFK?N{fmK3&ch^rVcuNO^sxWLHOilBqsU-jY06u2G0xa-3$+X*yVJf zI~UKD!|JODG?=z?b#@%R)2H;3->;EB5UGT%k572IJ9H)t2rW?e7n=(GlVKcew~ zam>#q{bbEX(IZg%qh}Cl4};R|cEX^#^}ujEn3WWj-=VjnVqeB)6cZ?-M$xdb)@29m z_ufV#^-1gmtJ^Qw-f{Jt@Nxv>aC+H%~G^2VgIo?1)MnmtZ} zMI}6!Tkm&Pk-rA1eY&y+84IJ4bhZc&$fWF6YB3!}hD{c%cKVSh;!zxj<{8y5_Os6v6lFp|HsipZZq#_}(K94RQ%wcq;|?*27TE1D)R4-6exbOcEBS z%I}n{D!QE7d_+oy%BWK3SY(;`3NWvBI}f5(w<3~H%jY>PW<++|`1Y_EEYv@E>|FBc zEpBxzGto3zWkt7uWI}nDJG+c}F8AQ~ug--Ojkl2wf@lG&9pol}ZWNM?@aV(v^eFokThdqjox%^trPxl$6CuYbl8lGynbl3QZ z3|b^N@NV8<$P)TW_=*|bTF4X13gK%)i)@oNh%DHS4h4+3@JM!lRU%NkrgpAinJg%p z+qa_h@kz1s%DxKPs!HgY*4N}qhE0iueK!OWx3xwKQP`Z4B4pdmju_3Md8|mk`Cc%- zYa;n2#lX}yK|c7mT#8@1TY&NeK9!hbYV>XS@%g{uU4GEo&kVQS{-E-?Vm3JG^W~1w zC!8Ud-`K}PlWHPN#=i|nsxXIDk8;#r*=#3GY<<$9w%{2$LIH#tu`su(9oJAnqrfw4 z2B31CsqvQR1`u!wcRiVFFJY2QTH7mv&p0FCxRvc&4P`=p7FV06-O#rf*;`riR2Hen z1j%}T=s42*hZN=hN?|AJ&QSw(pTM;uW#)&wuva7y%?`2+T|(M%dx79#Xr6Tf5EQZneX5 z2P@~l%{-;BRK%8)Gnu9s`b?a}EZWeWB*{-=kM>CA7vs}9+IoD&2L|&IVri>A zKNu&oqw$OineM{b%a(ZL!=TMLe44hW;ClSgZtfDu-Ckdh>m0UO)UTQ@Xl*Q$Va9=y zYRJ(y&SXiWT>*wDSF&~!19j1SQE(K74up4it=ZE9J4HVybMXTlaII>#b~^2i^B{4k}n@Vpf23Wh$D#Ya#JXJ1Q_4S^<1( zeBvx#7Lt$S{5mY>4LYSNp-JyPj)f8Nl!!>RiM{r~r><3RNmkoOaBJhV5071mB<0&6 zMBRb=79(1t*sG{m`7#2S)9&iuYS}3(U$hq&`iCd6TSvwqW)3;&U7gCY ztZY44JH#hvo7gqS=3590X!A|b)PoxmJw%j>qRjFM0gpSCK0d`;XEr16jPQ51fx8Pg zHE3**?VgN+8{s;Q_Xwq`q**R6L}YvGV~&d%YEcmsHyvkY zEt;G0xi+_(@Rs;=y-^647=v$D^kx7XO*I#*?Q74^vu1pOSVT&eYe;;Oxm&_%3dS__ zs%gcJ3XFwo2WK^pr~YD5*6G0vbu;}Ccv>sPJ%&hyooMt)Hz$~RQ-kBPU~E=(0nT5- z*v}0Xo%~=(jxL#S-!KUQrk*}dD_O^@{9r2LkwH7E;&WcsY42>R@ErHV8^YC=r?d?y z>*o$s3jBT#2Z3Rk{rANUiw|L$fsL+2%4DGOYUPpKZ!Xq)d}v0d+p#`1{nQz1MVmSn zXbTQX-;S(NfK1E?Wl2z>fVl6y?utRE&}W| zt>AS>Z!PKGCJRwLEs}cqL*YqtkljsW9JNFS#gM8=6{@E6@B^8@6B!9?`;jdeM`S%w z7^M2f#)~)POPrKF@TOaEl^(IqRJ8k?(rQ(%;~Q(pZdR4HzkM*}5A8#O!n%~$3ozAv zSp+E~WuP?AMQIp{h5@4#!J%U9=W6_vmk|rQ*8a)Yg=UXWtl=)X*R))nDbE0{>1sC} zi3b}oQ_gy-p*=7y;~8;*Vsg&o!(K(Tg(5S?F_9x;(dWg+nze~%qYVG1ZQCQ>BVrWm zCgbrp@g}&O_i-eAkQAAU>aoZZ^LGi8+fz|Crw@SDpfkc0VWK?f3xG zVYb*wPj2A$79(=&ksP?KY>3_)MNsFbgoZp(U=hmM+Z-J>}%+69htY7X}bKOmOd1r0d^CYh6 zkBQB&5v1?nR$AD6KH2T(`_;hnHFoIV`AQ~JMXr4&^(BNg4G;8KIVsUIrMGCEUo`x6 zDBTiM=|!;Z|H;4kAs?mR1Cv}yX6P#{-a6G)-w7F}HTsHYE0zx6AZ z!wcPqXUs@YxXSGgBgcMApzWYs{(4;phk|>E#G{#kPjnc&jp8P5dRd#Io%cSDlzlng z{AhE_^Fcf0^47L~W|bhx1W`NADQjZ9_^#>JJqLWyI}W-hkq0E6s__<1CfV(B6ozE( zJLr?-k4))TjV@qH#IO}MH-XI7y14#Ng$jaCb!7|lCfvK)J|dCKLT$lWu_UI}E7S<=H?DX4tYo#>yf;}>4-2VQy4ce1x&>fn_cLn3eO&nO+_?;#i zw3i(aEh-z&FG}jNXpcJU;%eNMLpxwuqSNXSh&yA&BCXUfAl5O1-lUi4G)SO^1AvJ8{+@GSRJ8mE(5q3!94h^DUc zsr=a9_gw?$ad2Y1Y@1POPgkp0oBiG1WTnHx5?6|LX2{D)66f>f*Q3Vti0qrNAay}C zh%T%?SuzH>n4f<2JNj@Wjs3myLa?dyJSMgBp~-2zxiruG5HA*A9SyZ3o746ncVyUB zr9Z2*CPH_g{xc5N=Ya$YwsAI4P-77_@D(ND{awAZNzpRC! z(eUTAy7E#f%{`!3lImU=oT5I-2KlcfRe)kvr*)18JO{+_enyGJRD_@R}L2D0L**u?*Y(60`!x z*<2sS(;}Aop9kT~WKM610cm6BTlwk|U+)g{D~&%Fsa_7j)V9 zo?I!D6`cW;n~ebDKfE{sc?|SEs}5vlp#k}p-i);>Y43kCC8f86fgaowdl#(exa?0o zh;mM=3V@aE?LPTY5VR5d?CZ)Y&I{J5cCqeX&g?_gF^wg&Z)$WD;jO{K9ui@~Hdj^rxlpf z9Eu+P3O8*wP{yh~-`VZhB!;)-!?ezM{Pkr_BHBZ{w@-!UOb)n_)TB0L@!R$q4^>Yg zt;>bDoi{a{o7x`XuVpZjHDA2aLprmWWd^cfhex7inNuk%{;BKUkSPNRtEc zza-iT|4qj1;R2f=m{ty%pat4qAxH1<*`I~r`uXQf{F}ZnzBbL?E6KIDEzbw}X@C#N zQdK%vM`uA8FrT_Nivxp=Zrj&C^?O=byJN0h$xdf%M@MQoomvB}W=kX66ddBUe9fwD zG=Zxh2c-bo4V|&ZKIM9n_)`^Ty5q0Y{YN~TufSD@@_r;b!8MEl1C}aHRpBKE{wkSQ zIh=c10;>R|62RCSfNm0Bdl_HBR>{wi|HTtoJr1wc^QO{l4I2ukOO*7H?i;kX4S{t< zenI9Nbr@Ll1HLM7Ht4ReF#qL6cnM%m2(C;Tv5ykO#EznYgno$G%mvhivr%s5|O;3m_Q=d@j9rT zYC=+-J@Z!C9qD2FHh!8p1Mia)oXnOOdd*t6GCNqwGR4HrC_&H^oZRR-sZgWwTbj=O z!=0~J4*m{FBDybv(RiiULJ8M5fNf!2g8v?bZ9ywSvj^E(?KyeU8p$Pv#OBihHeBq) zWMM6C$^-$WkbF$@6WfF0YWOJteJ?Pwzx{^{VYtxXU5*=%0mBuQ8=O4~|H7anp)cZ$ z0uTYL;eR|;CVIz(@DOTo^{H01+nVI=hAs;DwN^=jbnIcgOo&L};!2Nk`s%D$iVPGF z-3Nh!pIPV&RqHb0$qkHq*0WJgKe|9$%vAx#?8&{GBxOOgn+v9$!DyXFq{9_l3}j&$ z&d&dVqCWKwqHlHG7)>D3`5}eCDySn zn1R>NqJ45w@Wb40!u`1rM)>4RKIuutkTs+!SVgUV+BCcRHQkN{FkVj|Q}5MC3Mh6w zHq@8pA&j~X?ks$7ZG;V$W1NkFy?v_E--yMa(qO^BJ-f7K&pn(Rd3$uAJfnvwb0_wR zLyuKILd9@=@GNM{qZd7Sk-u-9 z8hnG?8;2d;wu~z9aAgbkitRNa%9#yBeR4pCVMhb62fI@{!@pA3E5W?00%W?<-hTeb zXnt3)qF=5{c=xt{7abojDU8~F!9~(O9G!-mPYuQUT>M+?0c<7J0kkeW8M@=vQEew( z3Jk^Ooge`<)yUQmpGz!H$q;p`ONP!rxNL_Lq)f4PIn;yqOnsv6LK4(;LvT2{K9frj zqBc@E=fP>%qJ!Ts7{?Yn?EUC@MxFf87{&VU`;e1_+JXkqJ#_^!$Jz`(KuMcL7A4(R7Tr8loR1EJ1ZJcFSzl1rW%)Y)3T?Hh{Y+D`UZ25Hre|mZ)23 z=Wl5F(bAXYjh_;At^ar)a3P$^*^YQ8^b!ts(*uO9x=(M)8B;j6A{hr~G$o&V`S#Kobn}9sHW#ioMclp98a; z?N6i`F2enDfUyLe6XgzLH1mmH#vs;j-`sffR8soP>rSG!VK=v_O&gI zgI&!CbG+2p&(uogt>UHN#12YvdgE`X)nNc)o6V6+TIQMP3UKQ_JVt1>(J>iPqSw5o z?+EY5cELRrOysYfHWjLEr&{U_Et9 zBS;@k^qwEsaQFI1*C6-l@EV>-A%_bjq|Z;LYt~#;s@{mcg|Mx#-^9i-?KpktZel32 z(5-QS4&VA|)h3GOf{tlZf08is>8S8^?q&-79$^JC!wYKoe{aLtf*jup$hXJNT-thq z)(Dq%Ph&1*{?k|6?SDPU3g3AawK-cX`OAEK_h_JWx)3@`8E8TcN3;gtaqd>)I9R%=94AHMg1-R3lq_ItzhtX zs@2XwqrcU50Cld9()E{=98uN>?4*<3$=&Jt*=I#M{O7hq(}lm5;4h6OgNF|nI1l_6 zSOZEV!`>JL)jmp#pdugA2g`hftMd`HHl+U?F9Zo-Bgz|7Zx>ARx1!(~lYC(WLQmOR zz40Ifkwe8u8-W_PQj1`Y8+I%pQA%r z*ERO#JtRSRZ22YC1jO9e%2sC4vfi;DCsdJw%r=DyR^Cn<`&m`Fj3*l7`Td4y?-+Zs z6UgzNDDO6sj5vhRNUj5?(K9LWv)2Dk({U7ftADVd{InqJrfH}Qm@Mu(yiXBL8bbyw z?__zMDR%Js2D(_v{yv!2Se(Trd>3>SAXH9Jird_GG0ln#^TSz6Tr+5>(N( zG*L>oEKNiITf^bYBsXMoHrI>vMap2IWREM7f_v5aP7n-1SP}|c-T68l-{5b6oZQUb zf#rC0JH@iUu0yFsgt_gUYcWz{k;3Q_5t z>1{sQXG^SX|G3ENOGAg!obXnMX3JO`P61J)J7U5ziV0`Dt9m&jYaHDMJ)zX3JP%2_W^a*-9oD`JjAwe1U9A|N? zw4O7fOfeGURurS}SI&HQ6@p0Qie-4a*@)Vu-C`r>u5{_b4h<+M4v+{Q?{-OAgmi}~ zLkWK=o68b1DM-A};8tm0s9JlmoRkb`vccCm@+a`?|p2m6%3tek1)wAv zSa1w+Vpxe}Ss~B-lFPh#DiUB|{D@SNZW_^wTL~l*mbv>~>f>l!pxaULpKEw=)qg$Nk9XOZYa0G2t^x+V<4bVRsZwaH)l>g+ zt9F(fdeUuY&9`#q67O4N#lp&p4o0I`h;h~8nD%Y!VOXH11nFhZNF(N2!gf2Ki@*^O zc!w+}zr?24dgV{{jW4j06!46QsxD8>kiKy|T2coVzKR{z8^&;gdKBv?RI~VNRYNda zd)o=b)fVYba=_)__Zg#v!mI%ov5u1dFhn?K3pV+x7}eJWa{bmy%>Hz*yG125JBl{T z0y4rsW1Dfw12$(}+o64>v&Oimt))6>O{WYr!WT){=t?B(hyAnkc2uIh(QKd<&(RY> zNdREH$S)4bcXCL_llWl)#TbELpD;y}%(Hwz)JO`&y7M}*jp$IMR>^)-x~3y?#kQP^ z$c3J+)0Pssgf7!`$Mk$@d8#qM*4rFvy%N+&Q#yiMv#F4d3BQkf4N_@4!((sVyXLA8@w)$&@DoMIpDvLKu>k@E ze2;ItOR)VYr@!35`h2SzmE0*5wMysn z(fd+?#wH#rw-_Quw!}dztCp^KPa$TXl$x4!!8kdyIb`7Z2);G;*s!$k2hB1wRa_VD zLZO*yZb{h|?l-ZkHYx|6%csKFL<>S$eM`b+@*Jo~=DBkS9<~utwZeQN3_>#6ibDBh z(U32@O#U79W_03WUP`mW$8Esvj7=er+iRY;>&r?58Ko#pD>E$Xr?&r1Z|5L|iPvYrRTT=WVsg(!euCvC6uvh>(|SH7tMKxFw@ro?p4Rw4=%LsS5< zKo~laFV)s)e+gu{CqsplvDW8X*XChM5EU@t+mm^5Mbh>6UhEIpQ@A*wiD+`Q2-z|S zONfxkB1I4RrBaaeZI2lJ_M0eo*GU!qV(*VJ--mKto&X2g;VD%+EyP{L0Ndhm$N2)@ zI2@>&i&&htvvfk*6q63hz`!xspc-rkJaSlUroULnDQqaKjpBd8hZ zR&>)cMb4Y$Q`5jmi$wFrQsskEI_XfX4Xx1V<-l=$%PRg$K4YQfca@`wZ)KY>t#7zR z+qI!!jbr65+(blavJ|Iuo~_|TQP%kzq7aBXNAJ5cK&)j8siLY`$SR6)f!VbX*~3f7 zgeWP*E|Kk$?fP}XXAkqwHuDeL?c^1`pF;^{`8XPaB8pTIkc0U}??2l2Bms?ki);ul zl+iT($hNQ_uns6rS6qT_rQA|Kd&plh=u(JbTSX8?r4{BJ@R4^ZGQEt&TElM&IPxvc zkK3^PDYTe5Q}q)cqDlYxq$6|)$m+}`AT1@t19>p$o)BBro?_d&5We2LJ-m z3@9|Sp!rdJ9C~I0qG!sA;JC0Td3JRPAT4QnsQAxrb}m34BH@SEleq0|Q$%CIP!sU3 zP#yg1_u(w9Ltmqy4wK$@Uqw>)w<#I+xkOlR!}9dv#%P{}Jajf$ zkjF|z9$Y*37$OMF1;1AeSQL|b z{1V`=?F*gk=fXn$>uMDvErrzMnKEm?^LAahMRhGzZCQ8hwx4=sR`(T+vBZ?-iA{bD zooqvt>;Tiq+6$u+d4@ZInI{80A}?rz9l?SIH)2ny_-7Sb2N~U`OGzqP5EL2a$287qs^)Tk6Pign6p&L3r?iPUgx7>h)9v&@~SOPb{-r0MD2 zyi1~1dG5c=A9oFa%E9hJ70O*dh8>=<$eQZ(@rIlIk5gqD!2G7gPh(pDfc$s%E14!j zV=q|=9U8IYXHRl&Ag0+-iN5C8$E@NSq{tLxNJ_<*klrU z&%PYkh3%PX1`%*lkJcfa5`eA}6IbuGE-`j(HS+d7-4CME>zu-=Bf{J9nw-Xky*a6+ z-frjGlhPmUkSBxmT#Sok?0-t%QxZDoQM=9^OyybOEH4oiFT4OH}G0 zW|fi!1Wk&~2xn~VaLdqPMJ?(fEWE$L?kDvIsTWv#@5l^M_l13M1coUO8J@WrFF*~} ziWP<YSL&H4` zY;+h5Gz`J+{xMb=EQhZz_Nld(z-{WyIlD$S0l&)lKc*NqmOL7K1?5WYv8|>t9-$2= zJSyqL(K!0PS#loR(<@e?JoqmVW@TEi@ijcYp51_dv68EwV)x>l$dUP*?=p_#C(-dW zW+6n1D`{nI?efj${n?}ZpfoCE?WM0VN2rHoo zV)$feiB2O2&55bEm%i#rGIgC#SvLxp-(x z(3rn7h*CL3`eXe-UqPYb^2!CO5rTs1ngpv)ZC{2OtC{TZK2))j1}aZYcxM9MHEA%K znw;~j%t#WRT1`xK#63Kg5+qu&^^o(Lq=QnMqC@D3FwkIbT}m7BtA|k{l~^238-{nC z=D&w9>hz&h!ZS{Z$_X^B$r_F)%_V&?ieS&O5WDPzusU4FPymx4-j~B8MHCIcTDlmz zoE=L#@gS+g|I2{Yb0 z!!*+l7rs=i@5=Pb6HNiYR!zvcW`HybsPvPvBw~s*iF=);Qj+Z(r!?TV8&2LNKQj*v}$m(Blk*sx4?U4jJAG*7c;0tZh4+Nkv>KUs_#>M zynwKqsv`Tq+2zi*hGv)l(_+g0Kc<=GpV)*!aet}vAnFHugLstX!B10$$U^O)DOW1ntiSk|P0Jp4= zbq5lPxLxa^BWU*>Dc+GIETYL?}JzTo0TzpJGW^&JuRyKiPM=MQjNOf8|6KC3uevYbXv{B+CJ&+%ab}zG91B`AQ z@m-9XDp>rrXCw4OAZR))ggBjWiSLSdj`2ib=-tl@f8R})F>Dxr2;#wkaEDXEx%lmOH z5&H8~CAtDG>y?w&BRTH%0~Nc1^>O2MmDhKjhXPO07YMAE@;o~DQ2hk2FIIiE!00KF zR(yU@MZ(QtWlU)QWkn$Y0ZfN<$h1>>8fSnQRp?Bh`yKYCS<)gt@K3XM6Nom=&cS`@lrzBt3}9YIYsUE(7NkU)jgc#&y+gs)5*$D(>e#e;A$DbNj?Mc68a=3y{JMEPkYd(+8Zt;6>Uq zd_3>Bf9v^Qtd^V}{Y-MP`=!yB7RX-R-R`Cmx0Qg!dex|^!u;T%p=xr|Gp8}5{Ta+*?937 z;X{&jYf)|;yR*&}=#6`$?Fw3VuSCir4w_}pM9c1<68VFYJJml5rZAE zx}^{)x&s6Z0UhN@-Rk8i(9Te86PiarGUl>G?}sO@j4~}qx-`G=-tB$#mYbGV-jkue z{0|4qOsIj+#}y`c2odKwRfZ2oC$aVBPS)feYRr%j@1LPP<-V~m*ROxnXqtqF!Y&iO zlCKrt0TR|5+qfMR&&LQJt?6h8zf&EHX?9*P&Imdf-s>Z$w#%@GJr7V!8a0;Md8zsw z4|TIErPwz{FO;Hu%t;@f_qk_widh?WO1VD!yngw2yx8|RC_W!w3N5DNhn2@4h9>1o^SqS8rFL|ybgPDlmv$-+SFM& zFXM%s^oT-tGNSa0vC#wK_GL1`E0YdrhI)DPm%nlIZa-Ni-hKw*~C4QMvrB z4Xxs*%gvPiM=(<)qX^f=1Gd%^7aZTu%>*_zhHTIcS^&fNWqoXEtuO)i42qf61dG%FEM z5(B^EX(RN)*ElYUhn+=tlsP4JwX}U&$I}^FoDy%j&Cxe;%v>I=I)D7pls9=Bo1gH7 z1p%ppm7S^wKOy5Y?EM3kV6f=uYD<^9q`mE!lsK!@-?&b+ttmB?4nJ&zYLVLHqYCyI(&J5ZU6o;|9a7$ z&@|e$4lCH-fzy%~OFxh<=&P&FU)UEh5wZJyIO4E4wKl#xjb1tddULA0$%DeZjDSqnXc64h?IR+l<+AFuT@!0~16jU$1qnVNJ2l{~?cc=rf@m$ZR)Q z&W?54av*}TKOti+XOH)^SQ9h6A^CizHG#%VulD zqL|#9^9|Ih;!__MpH}{6d3T?j)*Jjf^m0xi-dXaUHxx`6_`5w{dTCdcI`s%GZEf1= zVU3MRKoFuFDti~Ezuywgx>m6c6`YoSMQCV;_KbukvKWPmsTLYNmCcQ0{lOG|bfnPP zqU*`wFg6J8?94pFmNR1T8wyax(N^;EVv?I%cN1mEGU8rR{=Bj~iX^S$HJ6Gs7ZbV=hF7N#Z_rv`I z=j`+BbM}7rTI(z_G^G}9L9;*Jzt1rru~U!RxDK_@Xi8E(;R$+#Omq&uWbT0`x)B)SDNFZobE(jGY{I3MbVf z3G29FiUEJuwGf84-wtMc>-;KJ(h;<3R2gJVl2Xt{Sp8A(9mr*R@0dtZFUZ2m*UIjj z?}Q)FAA3#md$;dtp~u8_P(7TJJEx|Q>mv!U9<;8hRBVyOK-ruqTM{|U`4dkG?j6U$ zk>#qNoM(d%I46G8F1RYSYNMu#bs!4!uR&m9PsPb8`hHM@{P|NcjJToy-v57 z;+sDM*dk_9-1d!HkDnZFoEMUPVM!oOsMlwiwh7wDT`iV zijIsZE9C85w0gZ2;uMCJqo_k9dc_vDf=ysEc0QA}ac5K4{b3`JKf(+DQuOa=LrS+e z%#i;!R8`z$JN^er)%Rs(6o%+Va{Z}HbKIskJ}u^RK|KSq208c(K(G(X&?Ya~{Pn=E z)qp=%k%(6W!Y_Bc;_lmPYg`9MY7>O(a^6J0|8o6Tb>ENSRe0Zr{>{@4ZMN^dRP6AP z)r0yvtM7tw3wI|>+*NnwdF`L%)m64L5SSR?1{VCW8@z1yWT{NVIp8)GmJVsVYFkS% z|HH2xi%-T?O=@rC0^&Y@gwlVUz3A$ zX~`Yu%gPuE`EoMfD02@jLmoFu(}l037@tv}YoZU$o^?Eur*@^$1_`8YJE^g|(H$5< zV%)+26)W1>0OpsXIWmwZRh00zB!NL$_DKzjxN+WO4R>xLIfzr7WA3lBo{S_|{Nh8> zo?oVuRnTWrV@Uw~c*zY!wyx7RL-SjO`?B2ju&L(LBVj_%^NbSSTHsvutI^&P0MQG2 zQljb0aC>4`d_gHB9S)1s7$eSVYXq@cXl&Ir>`Z9xo*;Csw+Tu5cf6$4rR*vvy|Ih? zdq;7+#{{RV2{A?&?-t`eHg6h9$=F(AKJO)!lcy zfJH3h5^qeo&=UPCFO3DIZo-;Z8%227>ML$sO$gmM*u^~Q!oJVi_3wrAReC}Bnl|)$ zX1umAhXIWp6O5v?l@xE0l91=1iYe7cQE!v8ecoQiEY3LQ>$W$O5(4Q>i@Ky5Ic`PI znx#QANYX#|++L$uv=q*GLTNiOI#Kj{!e^(1VbM~}HC;37r{39z-;d`k(a0M*GmU_? z49%Q4son3B$QS~n1(mX-@LgAJqRf>1Fq+TA$sf&5gi*=-U?j{;#02>f?-f>^Su1y> z>je&+`|_`*N$c3A<9@$mY*w=?->DJf83vBd`q7n3MjUsIj?q1M2aJ3Rj;nf>y^)qP zfcqC3Hy7qB_ANG)78E46T4-KYU|DrIn`qxEU2>*9d7;nJeDMeWV~nq65Bp$`$>5## ziJVgM1xtQxnxPb9f?AJu(_1PkYIpMJKV=fN@xqumf=|cqpb@8fL1G%^S=q^Eph#0f z_E(8mc2(p94+THv9GYtK->qC7;gM*|g#vf$(s?~Q>1VXjF=BzwToizePd@VJ2BXnd z9ilh0j-boqjOCw}nQ+Fxi)s>l#n5cIPK*If3LSR0IQ@~BDa#f7<2}VO?lPat3HeoQ zsZ7;Lc!gj172^aDhc!^Xq$E=wwdU0pKSb1MrZy+1;UHS){nx7LTmLd76UFAf0wi$1-K1I3gsV;TzFQe2CaZBl}}= zhK@#=6eAId{*&4KGmia@S0h~W&}Ygt2QfbKgR~kp?M6SqF|JokTcOHa7+U5i2Aiir`qDu869rQnf61M?>DJ^O_2p&hX^B%KX zVk>65tGT;wUg>IadPCWw~=kd0n2Pyw|DVoJ$5byVld5W5^R;Z^0wIDK+e*pe79w z5{Hq-^5>eO<1Wq?G%b3ppF|ZChA*g9@I(JFcZEi`6D&wi2ZS1^*AwE z45YB^7D*Mk=4w-0E8uG=3J-zm5!XUfCo=v{m21{Bl$qt|CWrEgyAuROdvMoVh07dL5ACs{Q02qj_n`z60qB|aqz1~#3v|tRFPJBqD7(l z3k&JyxAoe($X%8_3#-HpYmzrYY$or}pR?|O7yxsI?OBzUnTlSC%J6J^fijDc?PEcG z&d*#GHFY?TNaT-Mo^3FTYJt+UkpZJ1md}fTo>yso3m%)obrXnrE#)@Y7x z&l_KAwbdJX60TsN>R08OOCdU5%04+eQ3I(xEiOc^@O#*VUl>s`*12b#X>egL-t9m> zB-S({5^R5;h#PAzdm;LpPs)sI_Lv%k<`}pb=0C9{#kFF}f++3@P`nA~2z`Nit^y^> zwCZW{bhKfAb{WKGQ?y*|FgI9&MLzed-csc=+;7j$oH@8b#(~c+2gHDkztPsVJ8vJ} z9SC@SV1gM6N08W<{vn^Y)Vo=XFp8cG-MO(fJa|GS;i0>)F}edknqTxCIi~le&z` zQd8cF*5b=}v6?JYAT6MmJSMF7rhC$MF1b()YU5eg`3O#1Y@iyW-+n{ZT0=m^iQqij zIyw1jMCXdpmF*C|fZrv{EpGp`3{i8QF&&6oXx*AV?2p1En;hwdmxfu|7S<3Mu4yC7 z=IYc`4RazPpoKay#hHmUBWWatU*rAEqI|V>=T8!B^z7|RkdeKOu7ksNMN2a}Kc|C6 z)PQ8vez!Q-kcRD6;rGE=hus}7kwN5he1QOfkx6IvGW-^NM&EsE_R9O75A)RiNgJv$ z9XYYIp$kjdn@HQ0z^hF=Kl}0+KfXe{(AFVuW-j!3m6td*?B7N}F;2$z*`Xmkwr>Kf z4$=u64v0-I#=daLpagTMe)HM7B!>>WL zRZHz>*mNF=mF|%&YUR#EbSt8b!#mgDKoo%-w5mSJbQ&+#g!J_EnMudomZ%g|MrN7dfqjJdaV@VbqH=KJ1 z7b+O(6B4*H#^u{C1A<$IGR}ea5Bt9~O+f$s!dB}FDxz}Y&Tn)txOw%qGVaFv6(WC? zz9bYYa>~YTl53*t1N>AsDVm3z&Cf%uL*)UJ3fV;CQe~*jfNTBn4Phx3xCO_gz?^Of zmIuLuqoS>lU{!8d>kcOk5V~sDurFtf&hBbcM0Z8apPYGtyPP_dRc3h;B%YynF@RAz zTB>}f-`xhR#eNT7SP{HW^g;gQ2Y0^Y(0e8U-_cK4Rb`y3R|OSZ@p986A1O|g`UM0o z?jfe#c_A>u4b3()*LWHWW-*Ud^{n?#9~&^g*tm2Ek^n)vJDp5N@nF< z>2L?b4R@?vtbe@@Wrh)NyR!%3qGx#}0UBxx;>_qy{_~t)_Rpg0K9Jy?K~# zT3?=yiHVujuA}-*D5+k&7l@7o^S7QptbGk$X@`-S5i(cXEo;s|(GSbwM@knTDZWkr zZh$*Ui|pd#=O zE|@5YMVWRJYsFUH6vpSd%GmKf3c1v%-R&P__1&R2s@^F2CRpqRU@~G~v0H+MK3y6hTq3hWN>s2!-|q%| zU$!1=64}({#O2x&XY1~ti>rowsSFZd?$AkH^rt4|xBMNvg!`dT@*%RV*d6_biqQls zxvO~VZym74+@nY*Ng6(hCWjjzO)u~`5K2%lN>GoD!U}GA`{nY3H_X4U`+kPcYn>s& z?nmApS0bPl3XU*Xp?WOXYn74gACB$QMV8ZM3rCCS86I`EXdC zU9QycN3MwLTbXKF1GlRDAA^U-O+_^Fl`m9HEr>aVh^#TWYxan~t_?NtcN(zC9fQtz zFS{H}hlMDU9~#Sz?hH8$6nQlEq!UHPi_n*=*>rtn+0U_0=7iPkD%<^ti5*+xhS%Au zy&dZ%4Q;ML^p<(Lm7L@!a(OJnA_Ileo`F25&A~}wbrrET>#0__=XDjV89f8hKFV(d zVYbEsy#GwXmr|1khD!t<#_gVVhiqT}n{=#MtJ?|zYUrCze|>Jm4g_y@Z`atlT22}d zOrj9ONF{oY3etI2BmshHgN1CcpoWb15_YJx?-~H%PVV3Gjtt&`QGK;=5)i$d)yomt z8amgd{-W@)ZCiHK(2yjR7;Zn;kJY>`2!?off#A!TS<7Pnuc3Sz@M70oB5G?_RE?;a*~61G(Z;T@m*MdZH$snsAp4L{~4&3w<;PFiA7crzYB19cr z@;Y8_r+lcnl_lw}`D77jNudxik=@Nif=+DGslF_}ZE8F*J1zXj{b50k@ZkoQqxKM) zYFyfFuQ{J{i7aZIle?xoptdFl*cw<;{`HM6YTZ3;^GUa7;X}Vk3_hMf<2?xi@G=B< zIE}uu_#>bb%{N(GS^Ybl)@ek#Qg69!fFW58OfqCx)YI$2q=fPln?uhm5>V~El3be3 zF^{@nNS-hra8`^pQb487yra7;r5JG`%)xV@BnDVy1x$Z;G_JhI_gEI_G=GCkm1WUC zw4wXmS5X%bmU&OeSb<>^;7@^cQY#W`nF#I$e=AqN#y*yVH$E4n(v)X~^HM;^T?i11 zqdaje=HB2BP&+?O-fh>c< zc*UoO^hPodZ@zNStNQ*66Y?Y^HX`~`qHa5h7_^LhGSQP#@!F zO|Nh2GxrP5M@|}KsDn|rww{nCVaXpJ{?Mcq8Kbc1`jSnl<;6R37R8f%Dh%VMk(K%< zF9kh^yb+)9L@cVsFIU&4N7NQ1Az2)#LgD+_ z;F6z0MW8{6xN4(r513cm`pFEXYUT6~io>~nTTfx6B}6h%T-k~JdarMdSmb4OVHIx z?_;)Mc)dNOujjiEO12~_ARFtj)4IDEo%3}psBt97ooyYA_v`JKA;0(4FE87_9vMl0 z51PI#TJL*HH1oR#Y?INdDP<6h0m+f2H<4bXyh{e<`t%87Ng1mfZyj@Y6h59TRM1wu zbFjOaJt}a`WfmQ;eAdVxz*;+ce z7p)tTw3YS&(`0OJe~^A=p+~XuO{P8V{wYn4q>R*Q4D~)Q>e$U?-!gRVHAC-X)>N3T z*XZywk~H@k&&>YF^6|mq!`zF1knO@A^S*NA$x_{X<`Td13QKR1txaA_Kau}E-N;UF z}_Nd@KmPd)J@GAAJyW1HH|?$Muvj@6BexRYo5)cd~F`_-spS9-Q)8GVMN1)L*T=voe>uY0Uw~F(xk?{pI&ZfN1;Q6M*|@;*?k38mE43V=eHCWpfwx_C#r znAllw?Q*~E6z81p{_Bque~HKLaDi1Q*3|XZtP#Ea-Ut0w_7PG`VGJklvgd1dIu%@Ub#a?BJX7Q2*Nx2x$mJ1*^ja}|1R)=J4Vtiv8$kotL2sflbO>$xT?hsO zuY+Alx)(6`Z%|>0ptQfJN+lq2Q~l089zy4B)UX4LKEKP>ou21e(zDC+-0;~l@Bh{E j`2XOe{3aXi30VC`rN0&MDDi9u{X#`SL%vehEckx_MeJN* diff --git a/vendor/github.com/hyperhq/hypercli/hack/generate-authors.sh b/vendor/github.com/hyperhq/hypercli/hack/generate-authors.sh deleted file mode 100755 index e78a97f96..000000000 --- a/vendor/github.com/hyperhq/hypercli/hack/generate-authors.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash -set -e - -cd "$(dirname "$(readlink -f "$BASH_SOURCE")")/.." - -# see also ".mailmap" for how email addresses and names are deduplicated - -{ - cat <<-'EOH' - # This file lists all individuals having contributed content to the repository. - # For how it is generated, see `hack/generate-authors.sh`. - EOH - echo - git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf -} > AUTHORS diff --git a/vendor/github.com/hyperhq/hypercli/image/fs.go b/vendor/github.com/hyperhq/hypercli/image/fs.go deleted file mode 100644 index 72c9ab424..000000000 --- a/vendor/github.com/hyperhq/hypercli/image/fs.go +++ /dev/null @@ -1,184 +0,0 @@ -package image - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sync" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" -) - -// IDWalkFunc is function called by StoreBackend.Walk -type IDWalkFunc func(id ID) error - -// StoreBackend provides interface for image.Store persistence -type StoreBackend interface { - Walk(f IDWalkFunc) error - Get(id ID) ([]byte, error) - Set(data []byte) (ID, error) - Delete(id ID) error - SetMetadata(id ID, key string, data []byte) error - GetMetadata(id ID, key string) ([]byte, error) - DeleteMetadata(id ID, key string) error -} - -// fs implements StoreBackend using the filesystem. -type fs struct { - sync.RWMutex - root string -} - -const ( - contentDirName = "content" - metadataDirName = "metadata" -) - -// NewFSStoreBackend returns new filesystem based backend for image.Store -func NewFSStoreBackend(root string) (StoreBackend, error) { - return newFSStore(root) -} - -func newFSStore(root string) (*fs, error) { - s := &fs{ - root: root, - } - if err := os.MkdirAll(filepath.Join(root, contentDirName, string(digest.Canonical)), 0700); err != nil { - return nil, err - } - if err := os.MkdirAll(filepath.Join(root, metadataDirName, string(digest.Canonical)), 0700); err != nil { - return nil, err - } - return s, nil -} - -func (s *fs) contentFile(id ID) string { - dgst := digest.Digest(id) - return filepath.Join(s.root, contentDirName, string(dgst.Algorithm()), dgst.Hex()) -} - -func (s *fs) metadataDir(id ID) string { - dgst := digest.Digest(id) - return filepath.Join(s.root, metadataDirName, string(dgst.Algorithm()), dgst.Hex()) -} - -// Walk calls the supplied callback for each image ID in the storage backend. -func (s *fs) Walk(f IDWalkFunc) error { - // Only Canonical digest (sha256) is currently supported - s.RLock() - dir, err := ioutil.ReadDir(filepath.Join(s.root, contentDirName, string(digest.Canonical))) - s.RUnlock() - if err != nil { - return err - } - for _, v := range dir { - dgst := digest.NewDigestFromHex(string(digest.Canonical), v.Name()) - if err := dgst.Validate(); err != nil { - logrus.Debugf("Skipping invalid digest %s: %s", dgst, err) - continue - } - if err := f(ID(dgst)); err != nil { - return err - } - } - return nil -} - -// Get returns the content stored under a given ID. -func (s *fs) Get(id ID) ([]byte, error) { - s.RLock() - defer s.RUnlock() - - return s.get(id) -} - -func (s *fs) get(id ID) ([]byte, error) { - content, err := ioutil.ReadFile(s.contentFile(id)) - if err != nil { - return nil, err - } - - // todo: maybe optional - if ID(digest.FromBytes(content)) != id { - return nil, fmt.Errorf("failed to verify image: %v", id) - } - - return content, nil -} - -// Set stores content under a given ID. -func (s *fs) Set(data []byte) (ID, error) { - s.Lock() - defer s.Unlock() - - if len(data) == 0 { - return "", fmt.Errorf("Invalid empty data") - } - - id := ID(digest.FromBytes(data)) - filePath := s.contentFile(id) - tempFilePath := s.contentFile(id) + ".tmp" - if err := ioutil.WriteFile(tempFilePath, data, 0600); err != nil { - return "", err - } - if err := os.Rename(tempFilePath, filePath); err != nil { - return "", err - } - - return id, nil -} - -// Delete removes content and metadata files associated with the ID. -func (s *fs) Delete(id ID) error { - s.Lock() - defer s.Unlock() - - if err := os.RemoveAll(s.metadataDir(id)); err != nil { - return err - } - if err := os.Remove(s.contentFile(id)); err != nil { - return err - } - return nil -} - -// SetMetadata sets metadata for a given ID. It fails if there's no base file. -func (s *fs) SetMetadata(id ID, key string, data []byte) error { - s.Lock() - defer s.Unlock() - if _, err := s.get(id); err != nil { - return err - } - - baseDir := filepath.Join(s.metadataDir(id)) - if err := os.MkdirAll(baseDir, 0700); err != nil { - return err - } - filePath := filepath.Join(s.metadataDir(id), key) - tempFilePath := filePath + ".tmp" - if err := ioutil.WriteFile(tempFilePath, data, 0600); err != nil { - return err - } - return os.Rename(tempFilePath, filePath) -} - -// GetMetadata returns metadata for a given ID. -func (s *fs) GetMetadata(id ID, key string) ([]byte, error) { - s.RLock() - defer s.RUnlock() - - if _, err := s.get(id); err != nil { - return nil, err - } - return ioutil.ReadFile(filepath.Join(s.metadataDir(id), key)) -} - -// DeleteMetadata removes the metadata associated with an ID. -func (s *fs) DeleteMetadata(id ID, key string) error { - s.Lock() - defer s.Unlock() - - return os.RemoveAll(filepath.Join(s.metadataDir(id), key)) -} diff --git a/vendor/github.com/hyperhq/hypercli/image/image.go b/vendor/github.com/hyperhq/hypercli/image/image.go deleted file mode 100644 index 0de9f3acb..000000000 --- a/vendor/github.com/hyperhq/hypercli/image/image.go +++ /dev/null @@ -1,138 +0,0 @@ -package image - -import ( - "encoding/json" - "errors" - "io" - "time" - - "github.com/docker/distribution/digest" - "github.com/hyperhq/hyper-api/types/container" -) - -// ID is the content-addressable ID of an image. -type ID digest.Digest - -func (id ID) String() string { - return digest.Digest(id).String() -} - -// V1Image stores the V1 image configuration. -type V1Image struct { - // ID a unique 64 character identifier of the image - ID string `json:"id,omitempty"` - // Parent id of the image - Parent string `json:"parent,omitempty"` - // Comment user added comment - Comment string `json:"comment,omitempty"` - // Created timestamp when image was created - Created time.Time `json:"created"` - // Container is the id of the container used to commit - Container string `json:"container,omitempty"` - // ContainerConfig is the configuration of the container that is committed into the image - ContainerConfig container.Config `json:"container_config,omitempty"` - // DockerVersion specifies version on which image is built - DockerVersion string `json:"docker_version,omitempty"` - // Author of the image - Author string `json:"author,omitempty"` - // Config is the configuration of the container received from the client - Config *container.Config `json:"config,omitempty"` - // Architecture is the hardware that the image is build and runs on - Architecture string `json:"architecture,omitempty"` - // OS is the operating system used to build and run the image - OS string `json:"os,omitempty"` - // Size is the total size of the image including all layers it is composed of - Size int64 `json:",omitempty"` -} - -// Image stores the image configuration -type Image struct { - V1Image - Parent ID `json:"parent,omitempty"` - RootFS *RootFS `json:"rootfs,omitempty"` - History []History `json:"history,omitempty"` - - // rawJSON caches the immutable JSON associated with this image. - rawJSON []byte - - // computedID is the ID computed from the hash of the image config. - // Not to be confused with the legacy V1 ID in V1Image. - computedID ID -} - -// RawJSON returns the immutable JSON associated with the image. -func (img *Image) RawJSON() []byte { - return img.rawJSON -} - -// ID returns the image's content-addressable ID. -func (img *Image) ID() ID { - return img.computedID -} - -// ImageID stringizes ID. -func (img *Image) ImageID() string { - return string(img.ID()) -} - -// RunConfig returns the image's container config. -func (img *Image) RunConfig() *container.Config { - return img.Config -} - -// MarshalJSON serializes the image to JSON. It sorts the top-level keys so -// that JSON that's been manipulated by a push/pull cycle with a legacy -// registry won't end up with a different key order. -func (img *Image) MarshalJSON() ([]byte, error) { - type MarshalImage Image - - pass1, err := json.Marshal(MarshalImage(*img)) - if err != nil { - return nil, err - } - - var c map[string]*json.RawMessage - if err := json.Unmarshal(pass1, &c); err != nil { - return nil, err - } - return json.Marshal(c) -} - -// History stores build commands that were used to create an image -type History struct { - // Created timestamp for build point - Created time.Time `json:"created"` - // Author of the build point - Author string `json:"author,omitempty"` - // CreatedBy keeps the Dockerfile command used while building image. - CreatedBy string `json:"created_by,omitempty"` - // Comment is custom message set by the user when creating the image. - Comment string `json:"comment,omitempty"` - // EmptyLayer is set to true if this history item did not generate a - // layer. Otherwise, the history item is associated with the next - // layer in the RootFS section. - EmptyLayer bool `json:"empty_layer,omitempty"` -} - -// Exporter provides interface for exporting and importing images -type Exporter interface { - Load(io.ReadCloser, io.Writer) error - // TODO: Load(net.Context, io.ReadCloser, <- chan StatusMessage) error - Save([]string, io.Writer) error -} - -// NewFromJSON creates an Image configuration from json. -func NewFromJSON(src []byte) (*Image, error) { - img := &Image{} - - if err := json.Unmarshal(src, img); err != nil { - return nil, err - } - if img.RootFS == nil { - return nil, errors.New("Invalid image JSON, no RootFS key.") - } - - img.rawJSON = src - - return img, nil -} diff --git a/vendor/github.com/hyperhq/hypercli/image/rootfs.go b/vendor/github.com/hyperhq/hypercli/image/rootfs.go deleted file mode 100644 index 7ddbb851b..000000000 --- a/vendor/github.com/hyperhq/hypercli/image/rootfs.go +++ /dev/null @@ -1,8 +0,0 @@ -package image - -import "github.com/hyperhq/hypercli/layer" - -// Append appends a new diffID to rootfs -func (r *RootFS) Append(id layer.DiffID) { - r.DiffIDs = append(r.DiffIDs, id) -} diff --git a/vendor/github.com/hyperhq/hypercli/image/rootfs_unix.go b/vendor/github.com/hyperhq/hypercli/image/rootfs_unix.go deleted file mode 100644 index 4b50058ee..000000000 --- a/vendor/github.com/hyperhq/hypercli/image/rootfs_unix.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build !windows - -package image - -import "github.com/hyperhq/hypercli/layer" - -// RootFS describes images root filesystem -// This is currently a placeholder that only supports layers. In the future -// this can be made into a interface that supports different implementations. -type RootFS struct { - Type string `json:"type"` - DiffIDs []layer.DiffID `json:"diff_ids,omitempty"` -} - -// ChainID returns the ChainID for the top layer in RootFS. -func (r *RootFS) ChainID() layer.ChainID { - return layer.CreateChainID(r.DiffIDs) -} - -// NewRootFS returns empty RootFS struct -func NewRootFS() *RootFS { - return &RootFS{Type: "layers"} -} diff --git a/vendor/github.com/hyperhq/hypercli/image/rootfs_windows.go b/vendor/github.com/hyperhq/hypercli/image/rootfs_windows.go deleted file mode 100644 index d88bce8ce..000000000 --- a/vendor/github.com/hyperhq/hypercli/image/rootfs_windows.go +++ /dev/null @@ -1,37 +0,0 @@ -// +build windows - -package image - -import ( - "crypto/sha512" - "fmt" - - "github.com/docker/distribution/digest" - "github.com/hyperhq/hypercli/layer" -) - -// RootFS describes images root filesystem -// This is currently a placeholder that only supports layers. In the future -// this can be made into a interface that supports different implementations. -type RootFS struct { - Type string `json:"type"` - DiffIDs []layer.DiffID `json:"diff_ids,omitempty"` - BaseLayer string `json:"base_layer,omitempty"` -} - -// BaseLayerID returns the 64 byte hex ID for the baselayer name. -func (r *RootFS) BaseLayerID() string { - baseID := sha512.Sum384([]byte(r.BaseLayer)) - return fmt.Sprintf("%x", baseID[:32]) -} - -// ChainID returns the ChainID for the top layer in RootFS. -func (r *RootFS) ChainID() layer.ChainID { - baseDiffID := digest.FromBytes([]byte(r.BaseLayerID())) - return layer.CreateChainID(append([]layer.DiffID{layer.DiffID(baseDiffID)}, r.DiffIDs...)) -} - -// NewRootFS returns empty RootFS struct -func NewRootFS() *RootFS { - return &RootFS{Type: "layers+base"} -} diff --git a/vendor/github.com/hyperhq/hypercli/image/store.go b/vendor/github.com/hyperhq/hypercli/image/store.go deleted file mode 100644 index 9fbac3734..000000000 --- a/vendor/github.com/hyperhq/hypercli/image/store.go +++ /dev/null @@ -1,289 +0,0 @@ -package image - -import ( - "encoding/json" - "errors" - "fmt" - "sync" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" - "github.com/hyperhq/hypercli/layer" -) - -// Store is an interface for creating and accessing images -type Store interface { - Create(config []byte) (ID, error) - Get(id ID) (*Image, error) - Delete(id ID) ([]layer.Metadata, error) - Search(partialID string) (ID, error) - SetParent(id ID, parent ID) error - GetParent(id ID) (ID, error) - Children(id ID) []ID - Map() map[ID]*Image - Heads() map[ID]*Image -} - -// LayerGetReleaser is a minimal interface for getting and releasing images. -type LayerGetReleaser interface { - Get(layer.ChainID) (layer.Layer, error) - Release(layer.Layer) ([]layer.Metadata, error) -} - -type imageMeta struct { - layer layer.Layer - children map[ID]struct{} -} - -type store struct { - sync.Mutex - ls LayerGetReleaser - images map[ID]*imageMeta - fs StoreBackend - digestSet *digest.Set -} - -// NewImageStore returns new store object for given layer store -func NewImageStore(fs StoreBackend, ls LayerGetReleaser) (Store, error) { - is := &store{ - ls: ls, - images: make(map[ID]*imageMeta), - fs: fs, - digestSet: digest.NewSet(), - } - - // load all current images and retain layers - if err := is.restore(); err != nil { - return nil, err - } - - return is, nil -} - -func (is *store) restore() error { - err := is.fs.Walk(func(id ID) error { - img, err := is.Get(id) - if err != nil { - logrus.Errorf("invalid image %v, %v", id, err) - return nil - } - var l layer.Layer - if chainID := img.RootFS.ChainID(); chainID != "" { - l, err = is.ls.Get(chainID) - if err != nil { - return err - } - } - if err := is.digestSet.Add(digest.Digest(id)); err != nil { - return err - } - - imageMeta := &imageMeta{ - layer: l, - children: make(map[ID]struct{}), - } - - is.images[ID(id)] = imageMeta - - return nil - }) - if err != nil { - return err - } - - // Second pass to fill in children maps - for id := range is.images { - if parent, err := is.GetParent(id); err == nil { - if parentMeta := is.images[parent]; parentMeta != nil { - parentMeta.children[id] = struct{}{} - } - } - } - - return nil -} - -func (is *store) Create(config []byte) (ID, error) { - var img Image - err := json.Unmarshal(config, &img) - if err != nil { - return "", err - } - - // Must reject any config that references diffIDs from the history - // which aren't among the rootfs layers. - rootFSLayers := make(map[layer.DiffID]struct{}) - for _, diffID := range img.RootFS.DiffIDs { - rootFSLayers[diffID] = struct{}{} - } - - layerCounter := 0 - for _, h := range img.History { - if !h.EmptyLayer { - layerCounter++ - } - } - if layerCounter > len(img.RootFS.DiffIDs) { - return "", errors.New("too many non-empty layers in History section") - } - - dgst, err := is.fs.Set(config) - if err != nil { - return "", err - } - imageID := ID(dgst) - - is.Lock() - defer is.Unlock() - - if _, exists := is.images[imageID]; exists { - return imageID, nil - } - - layerID := img.RootFS.ChainID() - - var l layer.Layer - if layerID != "" { - l, err = is.ls.Get(layerID) - if err != nil { - return "", err - } - } - - imageMeta := &imageMeta{ - layer: l, - children: make(map[ID]struct{}), - } - - is.images[imageID] = imageMeta - if err := is.digestSet.Add(digest.Digest(imageID)); err != nil { - delete(is.images, imageID) - return "", err - } - - return imageID, nil -} - -func (is *store) Search(term string) (ID, error) { - is.Lock() - defer is.Unlock() - - dgst, err := is.digestSet.Lookup(term) - if err != nil { - return "", err - } - return ID(dgst), nil -} - -func (is *store) Get(id ID) (*Image, error) { - // todo: Check if image is in images - // todo: Detect manual insertions and start using them - config, err := is.fs.Get(id) - if err != nil { - return nil, err - } - - img, err := NewFromJSON(config) - if err != nil { - return nil, err - } - img.computedID = id - - img.Parent, err = is.GetParent(id) - if err != nil { - img.Parent = "" - } - - return img, nil -} - -func (is *store) Delete(id ID) ([]layer.Metadata, error) { - is.Lock() - defer is.Unlock() - - imageMeta := is.images[id] - if imageMeta == nil { - return nil, fmt.Errorf("unrecognized image ID %s", id.String()) - } - for id := range imageMeta.children { - is.fs.DeleteMetadata(id, "parent") - } - if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil { - delete(is.images[parent].children, id) - } - - if err := is.digestSet.Remove(digest.Digest(id)); err != nil { - logrus.Errorf("error removing %s from digest set: %q", id, err) - } - delete(is.images, id) - is.fs.Delete(id) - - if imageMeta.layer != nil { - return is.ls.Release(imageMeta.layer) - } - return nil, nil -} - -func (is *store) SetParent(id, parent ID) error { - is.Lock() - defer is.Unlock() - parentMeta := is.images[parent] - if parentMeta == nil { - return fmt.Errorf("unknown parent image ID %s", parent.String()) - } - parentMeta.children[id] = struct{}{} - return is.fs.SetMetadata(id, "parent", []byte(parent)) -} - -func (is *store) GetParent(id ID) (ID, error) { - d, err := is.fs.GetMetadata(id, "parent") - if err != nil { - return "", err - } - return ID(d), nil // todo: validate? -} - -func (is *store) Children(id ID) []ID { - is.Lock() - defer is.Unlock() - - return is.children(id) -} - -func (is *store) children(id ID) []ID { - var ids []ID - if is.images[id] != nil { - for id := range is.images[id].children { - ids = append(ids, id) - } - } - return ids -} - -func (is *store) Heads() map[ID]*Image { - return is.imagesMap(false) -} - -func (is *store) Map() map[ID]*Image { - return is.imagesMap(true) -} - -func (is *store) imagesMap(all bool) map[ID]*Image { - is.Lock() - defer is.Unlock() - - images := make(map[ID]*Image) - - for id := range is.images { - if !all && len(is.children(id)) > 0 { - continue - } - img, err := is.Get(id) - if err != nil { - logrus.Errorf("invalid image access: %q, error: %q", id, err) - continue - } - images[id] = img - } - return images -} diff --git a/vendor/github.com/hyperhq/hypercli/image/v1/imagev1.go b/vendor/github.com/hyperhq/hypercli/image/v1/imagev1.go deleted file mode 100644 index eabfec1ac..000000000 --- a/vendor/github.com/hyperhq/hypercli/image/v1/imagev1.go +++ /dev/null @@ -1,148 +0,0 @@ -package v1 - -import ( - "encoding/json" - "fmt" - "regexp" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" - "github.com/hyperhq/hypercli/image" - "github.com/hyperhq/hypercli/layer" - "github.com/hyperhq/hypercli/pkg/version" -) - -var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) - -// noFallbackMinVersion is the minimum version for which v1compatibility -// information will not be marshaled through the Image struct to remove -// blank fields. -var noFallbackMinVersion = version.Version("1.8.3") - -// HistoryFromConfig creates a History struct from v1 configuration JSON -func HistoryFromConfig(imageJSON []byte, emptyLayer bool) (image.History, error) { - h := image.History{} - var v1Image image.V1Image - if err := json.Unmarshal(imageJSON, &v1Image); err != nil { - return h, err - } - - return image.History{ - Author: v1Image.Author, - Created: v1Image.Created, - CreatedBy: strings.Join(v1Image.ContainerConfig.Cmd, " "), - Comment: v1Image.Comment, - EmptyLayer: emptyLayer, - }, nil -} - -// CreateID creates an ID from v1 image, layerID and parent ID. -// Used for backwards compatibility with old clients. -func CreateID(v1Image image.V1Image, layerID layer.ChainID, parent digest.Digest) (digest.Digest, error) { - v1Image.ID = "" - v1JSON, err := json.Marshal(v1Image) - if err != nil { - return "", err - } - - var config map[string]*json.RawMessage - if err := json.Unmarshal(v1JSON, &config); err != nil { - return "", err - } - - // FIXME: note that this is slightly incompatible with RootFS logic - config["layer_id"] = rawJSON(layerID) - if parent != "" { - config["parent"] = rawJSON(parent) - } - - configJSON, err := json.Marshal(config) - if err != nil { - return "", err - } - logrus.Debugf("CreateV1ID %s", configJSON) - - return digest.FromBytes(configJSON), nil -} - -// MakeConfigFromV1Config creates an image config from the legacy V1 config format. -func MakeConfigFromV1Config(imageJSON []byte, rootfs *image.RootFS, history []image.History) ([]byte, error) { - var dver struct { - DockerVersion string `json:"docker_version"` - } - - if err := json.Unmarshal(imageJSON, &dver); err != nil { - return nil, err - } - - useFallback := version.Version(dver.DockerVersion).LessThan(noFallbackMinVersion) - - if useFallback { - var v1Image image.V1Image - err := json.Unmarshal(imageJSON, &v1Image) - if err != nil { - return nil, err - } - imageJSON, err = json.Marshal(v1Image) - if err != nil { - return nil, err - } - } - - var c map[string]*json.RawMessage - if err := json.Unmarshal(imageJSON, &c); err != nil { - return nil, err - } - - delete(c, "id") - delete(c, "parent") - delete(c, "Size") // Size is calculated from data on disk and is inconsitent - delete(c, "parent_id") - delete(c, "layer_id") - delete(c, "throwaway") - - c["rootfs"] = rawJSON(rootfs) - c["history"] = rawJSON(history) - - return json.Marshal(c) -} - -// MakeV1ConfigFromConfig creates an legacy V1 image config from an Image struct -func MakeV1ConfigFromConfig(img *image.Image, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { - // Top-level v1compatibility string should be a modified version of the - // image config. - var configAsMap map[string]*json.RawMessage - if err := json.Unmarshal(img.RawJSON(), &configAsMap); err != nil { - return nil, err - } - - // Delete fields that didn't exist in old manifest - delete(configAsMap, "rootfs") - delete(configAsMap, "history") - configAsMap["id"] = rawJSON(v1ID) - if parentV1ID != "" { - configAsMap["parent"] = rawJSON(parentV1ID) - } - if throwaway { - configAsMap["throwaway"] = rawJSON(true) - } - - return json.Marshal(configAsMap) -} - -func rawJSON(value interface{}) *json.RawMessage { - jsonval, err := json.Marshal(value) - if err != nil { - return nil - } - return (*json.RawMessage)(&jsonval) -} - -// ValidateID checks whether an ID string is a valid image ID. -func ValidateID(id string) error { - if ok := validHex.MatchString(id); !ok { - return fmt.Errorf("image ID '%s' is invalid ", id) - } - return nil -} diff --git a/vendor/github.com/hyperhq/hypercli/layer/empty.go b/vendor/github.com/hyperhq/hypercli/layer/empty.go deleted file mode 100644 index 5e1cb184b..000000000 --- a/vendor/github.com/hyperhq/hypercli/layer/empty.go +++ /dev/null @@ -1,48 +0,0 @@ -package layer - -import ( - "archive/tar" - "bytes" - "io" - "io/ioutil" -) - -// DigestSHA256EmptyTar is the canonical sha256 digest of empty tar file - -// (1024 NULL bytes) -const DigestSHA256EmptyTar = DiffID("sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef") - -type emptyLayer struct{} - -// EmptyLayer is a layer that corresponds to empty tar. -var EmptyLayer = &emptyLayer{} - -func (el *emptyLayer) TarStream() (io.ReadCloser, error) { - buf := new(bytes.Buffer) - tarWriter := tar.NewWriter(buf) - tarWriter.Close() - return ioutil.NopCloser(buf), nil -} - -func (el *emptyLayer) ChainID() ChainID { - return ChainID(DigestSHA256EmptyTar) -} - -func (el *emptyLayer) DiffID() DiffID { - return DigestSHA256EmptyTar -} - -func (el *emptyLayer) Parent() Layer { - return nil -} - -func (el *emptyLayer) Size() (size int64, err error) { - return 0, nil -} - -func (el *emptyLayer) DiffSize() (size int64, err error) { - return 0, nil -} - -func (el *emptyLayer) Metadata() (map[string]string, error) { - return make(map[string]string), nil -} diff --git a/vendor/github.com/hyperhq/hypercli/layer/filestore.go b/vendor/github.com/hyperhq/hypercli/layer/filestore.go deleted file mode 100644 index 13b2ca9d1..000000000 --- a/vendor/github.com/hyperhq/hypercli/layer/filestore.go +++ /dev/null @@ -1,326 +0,0 @@ -package layer - -import ( - "compress/gzip" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "regexp" - "strconv" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" - "github.com/hyperhq/hypercli/pkg/ioutils" -) - -var ( - stringIDRegexp = regexp.MustCompile(`^[a-f0-9]{64}(-init)?$`) - supportedAlgorithms = []digest.Algorithm{ - digest.SHA256, - // digest.SHA384, // Currently not used - // digest.SHA512, // Currently not used - } -) - -type fileMetadataStore struct { - root string -} - -type fileMetadataTransaction struct { - store *fileMetadataStore - root string -} - -// NewFSMetadataStore returns an instance of a metadata store -// which is backed by files on disk using the provided root -// as the root of metadata files. -func NewFSMetadataStore(root string) (MetadataStore, error) { - if err := os.MkdirAll(root, 0700); err != nil { - return nil, err - } - return &fileMetadataStore{ - root: root, - }, nil -} - -func (fms *fileMetadataStore) getLayerDirectory(layer ChainID) string { - dgst := digest.Digest(layer) - return filepath.Join(fms.root, string(dgst.Algorithm()), dgst.Hex()) -} - -func (fms *fileMetadataStore) getLayerFilename(layer ChainID, filename string) string { - return filepath.Join(fms.getLayerDirectory(layer), filename) -} - -func (fms *fileMetadataStore) getMountDirectory(mount string) string { - return filepath.Join(fms.root, "mounts", mount) -} - -func (fms *fileMetadataStore) getMountFilename(mount, filename string) string { - return filepath.Join(fms.getMountDirectory(mount), filename) -} - -func (fms *fileMetadataStore) StartTransaction() (MetadataTransaction, error) { - tmpDir := filepath.Join(fms.root, "tmp") - if err := os.MkdirAll(tmpDir, 0755); err != nil { - return nil, err - } - - td, err := ioutil.TempDir(tmpDir, "layer-") - if err != nil { - return nil, err - } - // Create a new tempdir - return &fileMetadataTransaction{ - store: fms, - root: td, - }, nil -} - -func (fm *fileMetadataTransaction) SetSize(size int64) error { - content := fmt.Sprintf("%d", size) - return ioutil.WriteFile(filepath.Join(fm.root, "size"), []byte(content), 0644) -} - -func (fm *fileMetadataTransaction) SetParent(parent ChainID) error { - return ioutil.WriteFile(filepath.Join(fm.root, "parent"), []byte(digest.Digest(parent).String()), 0644) -} - -func (fm *fileMetadataTransaction) SetDiffID(diff DiffID) error { - return ioutil.WriteFile(filepath.Join(fm.root, "diff"), []byte(digest.Digest(diff).String()), 0644) -} - -func (fm *fileMetadataTransaction) SetCacheID(cacheID string) error { - return ioutil.WriteFile(filepath.Join(fm.root, "cache-id"), []byte(cacheID), 0644) -} - -func (fm *fileMetadataTransaction) TarSplitWriter(compressInput bool) (io.WriteCloser, error) { - f, err := os.OpenFile(filepath.Join(fm.root, "tar-split.json.gz"), os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { - return nil, err - } - var wc io.WriteCloser - if compressInput { - wc = gzip.NewWriter(f) - } else { - wc = f - } - - return ioutils.NewWriteCloserWrapper(wc, func() error { - wc.Close() - return f.Close() - }), nil -} - -func (fm *fileMetadataTransaction) Commit(layer ChainID) error { - finalDir := fm.store.getLayerDirectory(layer) - if err := os.MkdirAll(filepath.Dir(finalDir), 0755); err != nil { - return err - } - return os.Rename(fm.root, finalDir) -} - -func (fm *fileMetadataTransaction) Cancel() error { - return os.RemoveAll(fm.root) -} - -func (fm *fileMetadataTransaction) String() string { - return fm.root -} - -func (fms *fileMetadataStore) GetSize(layer ChainID) (int64, error) { - content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "size")) - if err != nil { - return 0, err - } - - size, err := strconv.ParseInt(string(content), 10, 64) - if err != nil { - return 0, err - } - - return size, nil -} - -func (fms *fileMetadataStore) GetParent(layer ChainID) (ChainID, error) { - content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "parent")) - if err != nil { - if os.IsNotExist(err) { - return "", nil - } - return "", err - } - - dgst, err := digest.ParseDigest(strings.TrimSpace(string(content))) - if err != nil { - return "", err - } - - return ChainID(dgst), nil -} - -func (fms *fileMetadataStore) GetDiffID(layer ChainID) (DiffID, error) { - content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "diff")) - if err != nil { - return "", err - } - - dgst, err := digest.ParseDigest(strings.TrimSpace(string(content))) - if err != nil { - return "", err - } - - return DiffID(dgst), nil -} - -func (fms *fileMetadataStore) GetCacheID(layer ChainID) (string, error) { - contentBytes, err := ioutil.ReadFile(fms.getLayerFilename(layer, "cache-id")) - if err != nil { - return "", err - } - content := strings.TrimSpace(string(contentBytes)) - - if !stringIDRegexp.MatchString(content) { - return "", errors.New("invalid cache id value") - } - - return content, nil -} - -func (fms *fileMetadataStore) TarSplitReader(layer ChainID) (io.ReadCloser, error) { - fz, err := os.Open(fms.getLayerFilename(layer, "tar-split.json.gz")) - if err != nil { - return nil, err - } - f, err := gzip.NewReader(fz) - if err != nil { - return nil, err - } - - return ioutils.NewReadCloserWrapper(f, func() error { - f.Close() - return fz.Close() - }), nil -} - -func (fms *fileMetadataStore) SetMountID(mount string, mountID string) error { - if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { - return err - } - return ioutil.WriteFile(fms.getMountFilename(mount, "mount-id"), []byte(mountID), 0644) -} - -func (fms *fileMetadataStore) SetInitID(mount string, init string) error { - if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { - return err - } - return ioutil.WriteFile(fms.getMountFilename(mount, "init-id"), []byte(init), 0644) -} - -func (fms *fileMetadataStore) SetMountParent(mount string, parent ChainID) error { - if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { - return err - } - return ioutil.WriteFile(fms.getMountFilename(mount, "parent"), []byte(digest.Digest(parent).String()), 0644) -} - -func (fms *fileMetadataStore) GetMountID(mount string) (string, error) { - contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "mount-id")) - if err != nil { - return "", err - } - content := strings.TrimSpace(string(contentBytes)) - - if !stringIDRegexp.MatchString(content) { - return "", errors.New("invalid mount id value") - } - - return content, nil -} - -func (fms *fileMetadataStore) GetInitID(mount string) (string, error) { - contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "init-id")) - if err != nil { - if os.IsNotExist(err) { - return "", nil - } - return "", err - } - content := strings.TrimSpace(string(contentBytes)) - - if !stringIDRegexp.MatchString(content) { - return "", errors.New("invalid init id value") - } - - return content, nil -} - -func (fms *fileMetadataStore) GetMountParent(mount string) (ChainID, error) { - content, err := ioutil.ReadFile(fms.getMountFilename(mount, "parent")) - if err != nil { - if os.IsNotExist(err) { - return "", nil - } - return "", err - } - - dgst, err := digest.ParseDigest(strings.TrimSpace(string(content))) - if err != nil { - return "", err - } - - return ChainID(dgst), nil -} - -func (fms *fileMetadataStore) List() ([]ChainID, []string, error) { - var ids []ChainID - for _, algorithm := range supportedAlgorithms { - fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, string(algorithm))) - if err != nil { - if os.IsNotExist(err) { - continue - } - return nil, nil, err - } - - for _, fi := range fileInfos { - if fi.IsDir() && fi.Name() != "mounts" { - dgst := digest.NewDigestFromHex(string(algorithm), fi.Name()) - if err := dgst.Validate(); err != nil { - logrus.Debugf("Ignoring invalid digest %s:%s", algorithm, fi.Name()) - } else { - ids = append(ids, ChainID(dgst)) - } - } - } - } - - fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, "mounts")) - if err != nil { - if os.IsNotExist(err) { - return ids, []string{}, nil - } - return nil, nil, err - } - - var mounts []string - for _, fi := range fileInfos { - if fi.IsDir() { - mounts = append(mounts, fi.Name()) - } - } - - return ids, mounts, nil -} - -func (fms *fileMetadataStore) Remove(layer ChainID) error { - return os.RemoveAll(fms.getLayerDirectory(layer)) -} - -func (fms *fileMetadataStore) RemoveMount(mount string) error { - return os.RemoveAll(fms.getMountDirectory(mount)) -} diff --git a/vendor/github.com/hyperhq/hypercli/layer/layer.go b/vendor/github.com/hyperhq/hypercli/layer/layer.go deleted file mode 100644 index 273827649..000000000 --- a/vendor/github.com/hyperhq/hypercli/layer/layer.go +++ /dev/null @@ -1,256 +0,0 @@ -// Package layer is package for managing read only -// and read-write mounts on the union file system -// driver. Read-only mounts are referenced using a -// content hash and are protected from mutation in -// the exposed interface. The tar format is used -// to create read only layers and export both -// read only and writable layers. The exported -// tar data for a read only layer should match -// the tar used to create the layer. -package layer - -import ( - "errors" - "io" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" - "github.com/hyperhq/hypercli/pkg/archive" -) - -var ( - // ErrLayerDoesNotExist is used when an operation is - // attempted on a layer which does not exist. - ErrLayerDoesNotExist = errors.New("layer does not exist") - - // ErrLayerNotRetained is used when a release is - // attempted on a layer which is not retained. - ErrLayerNotRetained = errors.New("layer not retained") - - // ErrMountDoesNotExist is used when an operation is - // attempted on a mount layer which does not exist. - ErrMountDoesNotExist = errors.New("mount does not exist") - - // ErrMountNameConflict is used when a mount is attempted - // to be created but there is already a mount with the name - // used for creation. - ErrMountNameConflict = errors.New("mount already exists with name") - - // ErrActiveMount is used when an operation on a - // mount is attempted but the layer is still - // mounted and the operation cannot be performed. - ErrActiveMount = errors.New("mount still active") - - // ErrNotMounted is used when requesting an active - // mount but the layer is not mounted. - ErrNotMounted = errors.New("not mounted") - - // ErrMaxDepthExceeded is used when a layer is attempted - // to be created which would result in a layer depth - // greater than the 125 max. - ErrMaxDepthExceeded = errors.New("max depth exceeded") -) - -// ChainID is the content-addressable ID of a layer. -type ChainID digest.Digest - -// String returns a string rendition of a layer ID -func (id ChainID) String() string { - return string(id) -} - -// DiffID is the hash of an individual layer tar. -type DiffID digest.Digest - -// String returns a string rendition of a layer DiffID -func (diffID DiffID) String() string { - return string(diffID) -} - -// TarStreamer represents an object which may -// have its contents exported as a tar stream. -type TarStreamer interface { - // TarStream returns a tar archive stream - // for the contents of a layer. - TarStream() (io.ReadCloser, error) -} - -// Layer represents a read only layer -type Layer interface { - TarStreamer - - // ChainID returns the content hash of the entire layer chain. The hash - // chain is made up of DiffID of top layer and all of its parents. - ChainID() ChainID - - // DiffID returns the content hash of the layer - // tar stream used to create this layer. - DiffID() DiffID - - // Parent returns the next layer in the layer chain. - Parent() Layer - - // Size returns the size of the entire layer chain. The size - // is calculated from the total size of all files in the layers. - Size() (int64, error) - - // DiffSize returns the size difference of the top layer - // from parent layer. - DiffSize() (int64, error) - - // Metadata returns the low level storage metadata associated - // with layer. - Metadata() (map[string]string, error) -} - -// RWLayer represents a layer which is -// read and writable -type RWLayer interface { - TarStreamer - - // Name of mounted layer - Name() string - - // Parent returns the layer which the writable - // layer was created from. - Parent() Layer - - // Mount mounts the RWLayer and returns the filesystem path - // the to the writable layer. - Mount(mountLabel string) (string, error) - - // Unmount unmounts the RWLayer. This should be called - // for every mount. If there are multiple mount calls - // this operation will only decrement the internal mount counter. - Unmount() error - - // Size represents the size of the writable layer - // as calculated by the total size of the files - // changed in the mutable layer. - Size() (int64, error) - - // Changes returns the set of changes for the mutable layer - // from the base layer. - Changes() ([]archive.Change, error) - - // Metadata returns the low level metadata for the mutable layer - Metadata() (map[string]string, error) -} - -// Metadata holds information about a -// read only layer -type Metadata struct { - // ChainID is the content hash of the layer - ChainID ChainID - - // DiffID is the hash of the tar data used to - // create the layer - DiffID DiffID - - // Size is the size of the layer and all parents - Size int64 - - // DiffSize is the size of the top layer - DiffSize int64 -} - -// MountInit is a function to initialize a -// writable mount. Changes made here will -// not be included in the Tar stream of the -// RWLayer. -type MountInit func(root string) error - -// Store represents a backend for managing both -// read-only and read-write layers. -type Store interface { - Register(io.Reader, ChainID) (Layer, error) - Get(ChainID) (Layer, error) - Release(Layer) ([]Metadata, error) - - CreateRWLayer(id string, parent ChainID, mountLabel string, initFunc MountInit) (RWLayer, error) - GetRWLayer(id string) (RWLayer, error) - ReleaseRWLayer(RWLayer) ([]Metadata, error) - - Cleanup() error - DriverStatus() [][2]string - DriverName() string -} - -// MetadataTransaction represents functions for setting layer metadata -// with a single transaction. -type MetadataTransaction interface { - SetSize(int64) error - SetParent(parent ChainID) error - SetDiffID(DiffID) error - SetCacheID(string) error - TarSplitWriter(compressInput bool) (io.WriteCloser, error) - - Commit(ChainID) error - Cancel() error - String() string -} - -// MetadataStore represents a backend for persisting -// metadata about layers and providing the metadata -// for restoring a Store. -type MetadataStore interface { - // StartTransaction starts an update for new metadata - // which will be used to represent an ID on commit. - StartTransaction() (MetadataTransaction, error) - - GetSize(ChainID) (int64, error) - GetParent(ChainID) (ChainID, error) - GetDiffID(ChainID) (DiffID, error) - GetCacheID(ChainID) (string, error) - TarSplitReader(ChainID) (io.ReadCloser, error) - - SetMountID(string, string) error - SetInitID(string, string) error - SetMountParent(string, ChainID) error - - GetMountID(string) (string, error) - GetInitID(string) (string, error) - GetMountParent(string) (ChainID, error) - - // List returns the full list of referenced - // read-only and read-write layers - List() ([]ChainID, []string, error) - - Remove(ChainID) error - RemoveMount(string) error -} - -// CreateChainID returns ID for a layerDigest slice -func CreateChainID(dgsts []DiffID) ChainID { - return createChainIDFromParent("", dgsts...) -} - -func createChainIDFromParent(parent ChainID, dgsts ...DiffID) ChainID { - if len(dgsts) == 0 { - return parent - } - if parent == "" { - return createChainIDFromParent(ChainID(dgsts[0]), dgsts[1:]...) - } - // H = "H(n-1) SHA256(n)" - dgst := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0]))) - return createChainIDFromParent(ChainID(dgst), dgsts[1:]...) -} - -// ReleaseAndLog releases the provided layer from the given layer -// store, logging any error and release metadata -func ReleaseAndLog(ls Store, l Layer) { - metadata, err := ls.Release(l) - if err != nil { - logrus.Errorf("Error releasing layer %s: %v", l.ChainID(), err) - } - LogReleaseMetadata(metadata) -} - -// LogReleaseMetadata logs a metadata array, use this to -// ensure consistent logging for release metadata -func LogReleaseMetadata(metadatas []Metadata) { - for _, metadata := range metadatas { - logrus.Infof("Layer %s cleaned up", metadata.ChainID) - } -} diff --git a/vendor/github.com/hyperhq/hypercli/layer/layer_store.go b/vendor/github.com/hyperhq/hypercli/layer/layer_store.go deleted file mode 100644 index b21d2978b..000000000 --- a/vendor/github.com/hyperhq/hypercli/layer/layer_store.go +++ /dev/null @@ -1,626 +0,0 @@ -package layer - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "sync" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" - "github.com/hyperhq/hypercli/daemon/graphdriver" - "github.com/hyperhq/hypercli/pkg/archive" - "github.com/hyperhq/hypercli/pkg/idtools" - "github.com/hyperhq/hypercli/pkg/stringid" - "github.com/vbatts/tar-split/tar/asm" - "github.com/vbatts/tar-split/tar/storage" -) - -// maxLayerDepth represents the maximum number of -// layers which can be chained together. 125 was -// chosen to account for the 127 max in some -// graphdrivers plus the 2 additional layers -// used to create a rwlayer. -const maxLayerDepth = 125 - -type layerStore struct { - store MetadataStore - driver graphdriver.Driver - - layerMap map[ChainID]*roLayer - layerL sync.Mutex - - mounts map[string]*mountedLayer - mountL sync.Mutex -} - -// StoreOptions are the options used to create a new Store instance -type StoreOptions struct { - StorePath string - MetadataStorePathTemplate string - GraphDriver string - GraphDriverOptions []string - UIDMaps []idtools.IDMap - GIDMaps []idtools.IDMap -} - -// NewStoreFromOptions creates a new Store instance -func NewStoreFromOptions(options StoreOptions) (Store, error) { - driver, err := graphdriver.New( - options.StorePath, - options.GraphDriver, - options.GraphDriverOptions, - options.UIDMaps, - options.GIDMaps) - if err != nil { - return nil, fmt.Errorf("error initializing graphdriver: %v", err) - } - logrus.Debugf("Using graph driver %s", driver) - - fms, err := NewFSMetadataStore(fmt.Sprintf(options.MetadataStorePathTemplate, driver)) - if err != nil { - return nil, err - } - - return NewStoreFromGraphDriver(fms, driver) -} - -// NewStoreFromGraphDriver creates a new Store instance using the provided -// metadata store and graph driver. The metadata store will be used to restore -// the Store. -func NewStoreFromGraphDriver(store MetadataStore, driver graphdriver.Driver) (Store, error) { - ls := &layerStore{ - store: store, - driver: driver, - layerMap: map[ChainID]*roLayer{}, - mounts: map[string]*mountedLayer{}, - } - - ids, mounts, err := store.List() - if err != nil { - return nil, err - } - - for _, id := range ids { - l, err := ls.loadLayer(id) - if err != nil { - logrus.Debugf("Failed to load layer %s: %s", id, err) - continue - } - if l.parent != nil { - l.parent.referenceCount++ - } - } - - for _, mount := range mounts { - if err := ls.loadMount(mount); err != nil { - logrus.Debugf("Failed to load mount %s: %s", mount, err) - } - } - - return ls, nil -} - -func (ls *layerStore) loadLayer(layer ChainID) (*roLayer, error) { - cl, ok := ls.layerMap[layer] - if ok { - return cl, nil - } - - diff, err := ls.store.GetDiffID(layer) - if err != nil { - return nil, fmt.Errorf("failed to get diff id for %s: %s", layer, err) - } - - size, err := ls.store.GetSize(layer) - if err != nil { - return nil, fmt.Errorf("failed to get size for %s: %s", layer, err) - } - - cacheID, err := ls.store.GetCacheID(layer) - if err != nil { - return nil, fmt.Errorf("failed to get cache id for %s: %s", layer, err) - } - - parent, err := ls.store.GetParent(layer) - if err != nil { - return nil, fmt.Errorf("failed to get parent for %s: %s", layer, err) - } - - cl = &roLayer{ - chainID: layer, - diffID: diff, - size: size, - cacheID: cacheID, - layerStore: ls, - references: map[Layer]struct{}{}, - } - - if parent != "" { - p, err := ls.loadLayer(parent) - if err != nil { - return nil, err - } - cl.parent = p - } - - ls.layerMap[cl.chainID] = cl - - return cl, nil -} - -func (ls *layerStore) loadMount(mount string) error { - if _, ok := ls.mounts[mount]; ok { - return nil - } - - mountID, err := ls.store.GetMountID(mount) - if err != nil { - return err - } - - initID, err := ls.store.GetInitID(mount) - if err != nil { - return err - } - - parent, err := ls.store.GetMountParent(mount) - if err != nil { - return err - } - - ml := &mountedLayer{ - name: mount, - mountID: mountID, - initID: initID, - layerStore: ls, - references: map[RWLayer]*referencedRWLayer{}, - } - - if parent != "" { - p, err := ls.loadLayer(parent) - if err != nil { - return err - } - ml.parent = p - - p.referenceCount++ - } - - ls.mounts[ml.name] = ml - - return nil -} - -func (ls *layerStore) applyTar(tx MetadataTransaction, ts io.Reader, parent string, layer *roLayer) error { - digester := digest.Canonical.New() - tr := io.TeeReader(ts, digester.Hash()) - - tsw, err := tx.TarSplitWriter(true) - if err != nil { - return err - } - metaPacker := storage.NewJSONPacker(tsw) - defer tsw.Close() - - // we're passing nil here for the file putter, because the ApplyDiff will - // handle the extraction of the archive - rdr, err := asm.NewInputTarStream(tr, metaPacker, nil) - if err != nil { - return err - } - - applySize, err := ls.driver.ApplyDiff(layer.cacheID, parent, archive.Reader(rdr)) - if err != nil { - return err - } - - // Discard trailing data but ensure metadata is picked up to reconstruct stream - io.Copy(ioutil.Discard, rdr) // ignore error as reader may be closed - - layer.size = applySize - layer.diffID = DiffID(digester.Digest()) - - logrus.Debugf("Applied tar %s to %s, size: %d", layer.diffID, layer.cacheID, applySize) - - return nil -} - -func (ls *layerStore) Register(ts io.Reader, parent ChainID) (Layer, error) { - // err is used to hold the error which will always trigger - // cleanup of creates sources but may not be an error returned - // to the caller (already exists). - var err error - var pid string - var p *roLayer - if string(parent) != "" { - p = ls.get(parent) - if p == nil { - return nil, ErrLayerDoesNotExist - } - pid = p.cacheID - // Release parent chain if error - defer func() { - if err != nil { - ls.layerL.Lock() - ls.releaseLayer(p) - ls.layerL.Unlock() - } - }() - if p.depth() >= maxLayerDepth { - err = ErrMaxDepthExceeded - return nil, err - } - } - - // Create new roLayer - layer := &roLayer{ - parent: p, - cacheID: stringid.GenerateRandomID(), - referenceCount: 1, - layerStore: ls, - references: map[Layer]struct{}{}, - } - - if err = ls.driver.Create(layer.cacheID, pid, ""); err != nil { - return nil, err - } - - tx, err := ls.store.StartTransaction() - if err != nil { - return nil, err - } - - defer func() { - if err != nil { - logrus.Debugf("Cleaning up layer %s: %v", layer.cacheID, err) - if err := ls.driver.Remove(layer.cacheID); err != nil { - logrus.Errorf("Error cleaning up cache layer %s: %v", layer.cacheID, err) - } - if err := tx.Cancel(); err != nil { - logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) - } - } - }() - - if err = ls.applyTar(tx, ts, pid, layer); err != nil { - return nil, err - } - - if layer.parent == nil { - layer.chainID = ChainID(layer.diffID) - } else { - layer.chainID = createChainIDFromParent(layer.parent.chainID, layer.diffID) - } - - if err = storeLayer(tx, layer); err != nil { - return nil, err - } - - ls.layerL.Lock() - defer ls.layerL.Unlock() - - if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { - // Set error for cleanup, but do not return the error - err = errors.New("layer already exists") - return existingLayer.getReference(), nil - } - - if err = tx.Commit(layer.chainID); err != nil { - return nil, err - } - - ls.layerMap[layer.chainID] = layer - - return layer.getReference(), nil -} - -func (ls *layerStore) getWithoutLock(layer ChainID) *roLayer { - l, ok := ls.layerMap[layer] - if !ok { - return nil - } - - l.referenceCount++ - - return l -} - -func (ls *layerStore) get(l ChainID) *roLayer { - ls.layerL.Lock() - defer ls.layerL.Unlock() - return ls.getWithoutLock(l) -} - -func (ls *layerStore) Get(l ChainID) (Layer, error) { - layer := ls.get(l) - if layer == nil { - return nil, ErrLayerDoesNotExist - } - - return layer.getReference(), nil -} - -func (ls *layerStore) deleteLayer(layer *roLayer, metadata *Metadata) error { - err := ls.driver.Remove(layer.cacheID) - if err != nil { - return err - } - - err = ls.store.Remove(layer.chainID) - if err != nil { - return err - } - metadata.DiffID = layer.diffID - metadata.ChainID = layer.chainID - metadata.Size, err = layer.Size() - if err != nil { - return err - } - metadata.DiffSize = layer.size - - return nil -} - -func (ls *layerStore) releaseLayer(l *roLayer) ([]Metadata, error) { - depth := 0 - removed := []Metadata{} - for { - if l.referenceCount == 0 { - panic("layer not retained") - } - l.referenceCount-- - if l.referenceCount != 0 { - return removed, nil - } - - if len(removed) == 0 && depth > 0 { - panic("cannot remove layer with child") - } - if l.hasReferences() { - panic("cannot delete referenced layer") - } - var metadata Metadata - if err := ls.deleteLayer(l, &metadata); err != nil { - return nil, err - } - - delete(ls.layerMap, l.chainID) - removed = append(removed, metadata) - - if l.parent == nil { - return removed, nil - } - - depth++ - l = l.parent - } -} - -func (ls *layerStore) Release(l Layer) ([]Metadata, error) { - ls.layerL.Lock() - defer ls.layerL.Unlock() - layer, ok := ls.layerMap[l.ChainID()] - if !ok { - return []Metadata{}, nil - } - if !layer.hasReference(l) { - return nil, ErrLayerNotRetained - } - - layer.deleteReference(l) - - return ls.releaseLayer(layer) -} - -func (ls *layerStore) CreateRWLayer(name string, parent ChainID, mountLabel string, initFunc MountInit) (RWLayer, error) { - ls.mountL.Lock() - defer ls.mountL.Unlock() - m, ok := ls.mounts[name] - if ok { - return nil, ErrMountNameConflict - } - - var err error - var pid string - var p *roLayer - if string(parent) != "" { - p = ls.get(parent) - if p == nil { - return nil, ErrLayerDoesNotExist - } - pid = p.cacheID - - // Release parent chain if error - defer func() { - if err != nil { - ls.layerL.Lock() - ls.releaseLayer(p) - ls.layerL.Unlock() - } - }() - } - - m = &mountedLayer{ - name: name, - parent: p, - mountID: ls.mountID(name), - layerStore: ls, - references: map[RWLayer]*referencedRWLayer{}, - } - - if initFunc != nil { - pid, err = ls.initMount(m.mountID, pid, mountLabel, initFunc) - if err != nil { - return nil, err - } - m.initID = pid - } - - if err = ls.driver.Create(m.mountID, pid, ""); err != nil { - return nil, err - } - - if err = ls.saveMount(m); err != nil { - return nil, err - } - - return m.getReference(), nil -} - -func (ls *layerStore) GetRWLayer(id string) (RWLayer, error) { - ls.mountL.Lock() - defer ls.mountL.Unlock() - mount, ok := ls.mounts[id] - if !ok { - return nil, ErrMountDoesNotExist - } - - return mount.getReference(), nil -} - -func (ls *layerStore) ReleaseRWLayer(l RWLayer) ([]Metadata, error) { - ls.mountL.Lock() - defer ls.mountL.Unlock() - m, ok := ls.mounts[l.Name()] - if !ok { - return []Metadata{}, nil - } - - if err := m.deleteReference(l); err != nil { - return nil, err - } - - if m.hasReferences() { - return []Metadata{}, nil - } - - if err := ls.driver.Remove(m.mountID); err != nil { - logrus.Errorf("Error removing mounted layer %s: %s", m.name, err) - return nil, err - } - - if m.initID != "" { - if err := ls.driver.Remove(m.initID); err != nil { - logrus.Errorf("Error removing init layer %s: %s", m.name, err) - return nil, err - } - } - - if err := ls.store.RemoveMount(m.name); err != nil { - logrus.Errorf("Error removing mount metadata: %s: %s", m.name, err) - return nil, err - } - - delete(ls.mounts, m.Name()) - - ls.layerL.Lock() - defer ls.layerL.Unlock() - if m.parent != nil { - return ls.releaseLayer(m.parent) - } - - return []Metadata{}, nil -} - -func (ls *layerStore) saveMount(mount *mountedLayer) error { - if err := ls.store.SetMountID(mount.name, mount.mountID); err != nil { - return err - } - - if mount.initID != "" { - if err := ls.store.SetInitID(mount.name, mount.initID); err != nil { - return err - } - } - - if mount.parent != nil { - if err := ls.store.SetMountParent(mount.name, mount.parent.chainID); err != nil { - return err - } - } - - ls.mounts[mount.name] = mount - - return nil -} - -func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc MountInit) (string, error) { - // Use "-init" to maintain compatibility with graph drivers - // which are expecting this layer with this special name. If all - // graph drivers can be updated to not rely on knowing about this layer - // then the initID should be randomly generated. - initID := fmt.Sprintf("%s-init", graphID) - - if err := ls.driver.Create(initID, parent, mountLabel); err != nil { - return "", err - } - p, err := ls.driver.Get(initID, "") - if err != nil { - return "", err - } - - if err := initFunc(p); err != nil { - ls.driver.Put(initID) - return "", err - } - - if err := ls.driver.Put(initID); err != nil { - return "", err - } - - return initID, nil -} - -func (ls *layerStore) assembleTarTo(graphID string, metadata io.ReadCloser, size *int64, w io.Writer) error { - type diffPathDriver interface { - DiffPath(string) (string, func() error, error) - } - - diffDriver, ok := ls.driver.(diffPathDriver) - if !ok { - diffDriver = &naiveDiffPathDriver{ls.driver} - } - - defer metadata.Close() - - // get our relative path to the container - fsPath, releasePath, err := diffDriver.DiffPath(graphID) - if err != nil { - return err - } - defer releasePath() - - metaUnpacker := storage.NewJSONUnpacker(metadata) - upackerCounter := &unpackSizeCounter{metaUnpacker, size} - fileGetter := storage.NewPathFileGetter(fsPath) - logrus.Debugf("Assembling tar data for %s from %s", graphID, fsPath) - return asm.WriteOutputTarStream(fileGetter, upackerCounter, w) -} - -func (ls *layerStore) Cleanup() error { - return ls.driver.Cleanup() -} - -func (ls *layerStore) DriverStatus() [][2]string { - return ls.driver.Status() -} - -func (ls *layerStore) DriverName() string { - return ls.driver.String() -} - -type naiveDiffPathDriver struct { - graphdriver.Driver -} - -func (n *naiveDiffPathDriver) DiffPath(id string) (string, func() error, error) { - p, err := n.Driver.Get(id, "") - if err != nil { - return "", nil, err - } - return p, func() error { - return n.Driver.Put(id) - }, nil -} diff --git a/vendor/github.com/hyperhq/hypercli/layer/layer_unix.go b/vendor/github.com/hyperhq/hypercli/layer/layer_unix.go deleted file mode 100644 index 5cc0e5714..000000000 --- a/vendor/github.com/hyperhq/hypercli/layer/layer_unix.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build linux freebsd darwin - -package layer - -import "github.com/hyperhq/hypercli/pkg/stringid" - -func (ls *layerStore) mountID(name string) string { - return stringid.GenerateRandomID() -} diff --git a/vendor/github.com/hyperhq/hypercli/layer/layer_windows.go b/vendor/github.com/hyperhq/hypercli/layer/layer_windows.go deleted file mode 100644 index a6ada85e2..000000000 --- a/vendor/github.com/hyperhq/hypercli/layer/layer_windows.go +++ /dev/null @@ -1,98 +0,0 @@ -package layer - -import ( - "errors" - "fmt" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" - "github.com/hyperhq/hypercli/daemon/graphdriver" -) - -// GetLayerPath returns the path to a layer -func GetLayerPath(s Store, layer ChainID) (string, error) { - ls, ok := s.(*layerStore) - if !ok { - return "", errors.New("unsupported layer store") - } - ls.layerL.Lock() - defer ls.layerL.Unlock() - - rl, ok := ls.layerMap[layer] - if !ok { - return "", ErrLayerDoesNotExist - } - - path, err := ls.driver.Get(rl.cacheID, "") - if err != nil { - return "", err - } - - if err := ls.driver.Put(rl.cacheID); err != nil { - return "", err - } - - return path, nil -} - -func (ls *layerStore) RegisterDiffID(graphID string, size int64) (Layer, error) { - var err error // this is used for cleanup in existingLayer case - diffID := digest.FromBytes([]byte(graphID)) - - // Create new roLayer - layer := &roLayer{ - cacheID: graphID, - diffID: DiffID(diffID), - referenceCount: 1, - layerStore: ls, - references: map[Layer]struct{}{}, - size: size, - } - - tx, err := ls.store.StartTransaction() - if err != nil { - return nil, err - } - defer func() { - if err != nil { - if err := tx.Cancel(); err != nil { - logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) - } - } - }() - - layer.chainID = createChainIDFromParent("", layer.diffID) - - if !ls.driver.Exists(layer.cacheID) { - return nil, fmt.Errorf("layer %q is unknown to driver", layer.cacheID) - } - if err = storeLayer(tx, layer); err != nil { - return nil, err - } - - ls.layerL.Lock() - defer ls.layerL.Unlock() - - if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { - // Set error for cleanup, but do not return - err = errors.New("layer already exists") - return existingLayer.getReference(), nil - } - - if err = tx.Commit(layer.chainID); err != nil { - return nil, err - } - - ls.layerMap[layer.chainID] = layer - - return layer.getReference(), nil -} - -func (ls *layerStore) mountID(name string) string { - // windows has issues if container ID doesn't match mount ID - return name -} - -func (ls *layerStore) GraphDriver() graphdriver.Driver { - return ls.driver -} diff --git a/vendor/github.com/hyperhq/hypercli/layer/migration.go b/vendor/github.com/hyperhq/hypercli/layer/migration.go deleted file mode 100644 index 9779ab798..000000000 --- a/vendor/github.com/hyperhq/hypercli/layer/migration.go +++ /dev/null @@ -1,255 +0,0 @@ -package layer - -import ( - "compress/gzip" - "errors" - "fmt" - "io" - "os" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" - "github.com/vbatts/tar-split/tar/asm" - "github.com/vbatts/tar-split/tar/storage" -) - -// CreateRWLayerByGraphID creates a RWLayer in the layer store using -// the provided name with the given graphID. To get the RWLayer -// after migration the layer may be retrieved by the given name. -func (ls *layerStore) CreateRWLayerByGraphID(name string, graphID string, parent ChainID) (err error) { - ls.mountL.Lock() - defer ls.mountL.Unlock() - m, ok := ls.mounts[name] - if ok { - if m.parent.chainID != parent { - return errors.New("name conflict, mismatched parent") - } - if m.mountID != graphID { - return errors.New("mount already exists") - } - - return nil - } - - if !ls.driver.Exists(graphID) { - return errors.New("graph ID does not exist") - } - - var p *roLayer - if string(parent) != "" { - p = ls.get(parent) - if p == nil { - return ErrLayerDoesNotExist - } - - // Release parent chain if error - defer func() { - if err != nil { - ls.layerL.Lock() - ls.releaseLayer(p) - ls.layerL.Unlock() - } - }() - } - - // TODO: Ensure graphID has correct parent - - m = &mountedLayer{ - name: name, - parent: p, - mountID: graphID, - layerStore: ls, - references: map[RWLayer]*referencedRWLayer{}, - } - - // Check for existing init layer - initID := fmt.Sprintf("%s-init", graphID) - if ls.driver.Exists(initID) { - m.initID = initID - } - - if err = ls.saveMount(m); err != nil { - return err - } - - return nil -} - -func (ls *layerStore) ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataPath string) (diffID DiffID, size int64, err error) { - defer func() { - if err != nil { - logrus.Debugf("could not get checksum for %q with tar-split: %q", id, err) - diffID, size, err = ls.checksumForGraphIDNoTarsplit(id, parent, newTarDataPath) - } - }() - - if oldTarDataPath == "" { - err = errors.New("no tar-split file") - return - } - - tarDataFile, err := os.Open(oldTarDataPath) - if err != nil { - return - } - defer tarDataFile.Close() - uncompressed, err := gzip.NewReader(tarDataFile) - if err != nil { - return - } - - dgst := digest.Canonical.New() - err = ls.assembleTarTo(id, uncompressed, &size, dgst.Hash()) - if err != nil { - return - } - - diffID = DiffID(dgst.Digest()) - err = os.RemoveAll(newTarDataPath) - if err != nil { - return - } - err = os.Link(oldTarDataPath, newTarDataPath) - - return -} - -func (ls *layerStore) checksumForGraphIDNoTarsplit(id, parent, newTarDataPath string) (diffID DiffID, size int64, err error) { - rawarchive, err := ls.driver.Diff(id, parent) - if err != nil { - return - } - defer rawarchive.Close() - - f, err := os.Create(newTarDataPath) - if err != nil { - return - } - defer f.Close() - mfz := gzip.NewWriter(f) - metaPacker := storage.NewJSONPacker(mfz) - - packerCounter := &packSizeCounter{metaPacker, &size} - - archive, err := asm.NewInputTarStream(rawarchive, packerCounter, nil) - if err != nil { - return - } - dgst, err := digest.FromReader(archive) - if err != nil { - return - } - diffID = DiffID(dgst) - return -} - -func (ls *layerStore) RegisterByGraphID(graphID string, parent ChainID, diffID DiffID, tarDataFile string, size int64) (Layer, error) { - // err is used to hold the error which will always trigger - // cleanup of creates sources but may not be an error returned - // to the caller (already exists). - var err error - var p *roLayer - if string(parent) != "" { - p = ls.get(parent) - if p == nil { - return nil, ErrLayerDoesNotExist - } - - // Release parent chain if error - defer func() { - if err != nil { - ls.layerL.Lock() - ls.releaseLayer(p) - ls.layerL.Unlock() - } - }() - } - - // Create new roLayer - layer := &roLayer{ - parent: p, - cacheID: graphID, - referenceCount: 1, - layerStore: ls, - references: map[Layer]struct{}{}, - diffID: diffID, - size: size, - chainID: createChainIDFromParent(parent, diffID), - } - - ls.layerL.Lock() - defer ls.layerL.Unlock() - - if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { - // Set error for cleanup, but do not return - err = errors.New("layer already exists") - return existingLayer.getReference(), nil - } - - tx, err := ls.store.StartTransaction() - if err != nil { - return nil, err - } - - defer func() { - if err != nil { - logrus.Debugf("Cleaning up transaction after failed migration for %s: %v", graphID, err) - if err := tx.Cancel(); err != nil { - logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) - } - } - }() - - tsw, err := tx.TarSplitWriter(false) - if err != nil { - return nil, err - } - defer tsw.Close() - tdf, err := os.Open(tarDataFile) - if err != nil { - return nil, err - } - defer tdf.Close() - _, err = io.Copy(tsw, tdf) - if err != nil { - return nil, err - } - - if err = storeLayer(tx, layer); err != nil { - return nil, err - } - - if err = tx.Commit(layer.chainID); err != nil { - return nil, err - } - - ls.layerMap[layer.chainID] = layer - - return layer.getReference(), nil -} - -type unpackSizeCounter struct { - unpacker storage.Unpacker - size *int64 -} - -func (u *unpackSizeCounter) Next() (*storage.Entry, error) { - e, err := u.unpacker.Next() - if err == nil && u.size != nil { - *u.size += e.Size - } - return e, err -} - -type packSizeCounter struct { - packer storage.Packer - size *int64 -} - -func (p *packSizeCounter) AddEntry(e storage.Entry) (int, error) { - n, err := p.packer.AddEntry(e) - if err == nil && p.size != nil { - *p.size += e.Size - } - return n, err -} diff --git a/vendor/github.com/hyperhq/hypercli/layer/mounted_layer.go b/vendor/github.com/hyperhq/hypercli/layer/mounted_layer.go deleted file mode 100644 index 8bec53ec6..000000000 --- a/vendor/github.com/hyperhq/hypercli/layer/mounted_layer.go +++ /dev/null @@ -1,144 +0,0 @@ -package layer - -import ( - "io" - "sync" - - "github.com/hyperhq/hypercli/pkg/archive" -) - -type mountedLayer struct { - name string - mountID string - initID string - parent *roLayer - layerStore *layerStore - - references map[RWLayer]*referencedRWLayer -} - -func (ml *mountedLayer) cacheParent() string { - if ml.initID != "" { - return ml.initID - } - if ml.parent != nil { - return ml.parent.cacheID - } - return "" -} - -func (ml *mountedLayer) TarStream() (io.ReadCloser, error) { - archiver, err := ml.layerStore.driver.Diff(ml.mountID, ml.cacheParent()) - if err != nil { - return nil, err - } - return archiver, nil -} - -func (ml *mountedLayer) Name() string { - return ml.name -} - -func (ml *mountedLayer) Parent() Layer { - if ml.parent != nil { - return ml.parent - } - - // Return a nil interface instead of an interface wrapping a nil - // pointer. - return nil -} - -func (ml *mountedLayer) Mount(mountLabel string) (string, error) { - return ml.layerStore.driver.Get(ml.mountID, mountLabel) -} - -func (ml *mountedLayer) Unmount() error { - return ml.layerStore.driver.Put(ml.mountID) -} - -func (ml *mountedLayer) Size() (int64, error) { - return ml.layerStore.driver.DiffSize(ml.mountID, ml.cacheParent()) -} - -func (ml *mountedLayer) Changes() ([]archive.Change, error) { - return ml.layerStore.driver.Changes(ml.mountID, ml.cacheParent()) -} - -func (ml *mountedLayer) Metadata() (map[string]string, error) { - return ml.layerStore.driver.GetMetadata(ml.mountID) -} - -func (ml *mountedLayer) getReference() RWLayer { - ref := &referencedRWLayer{ - mountedLayer: ml, - } - ml.references[ref] = ref - - return ref -} - -func (ml *mountedLayer) hasReferences() bool { - return len(ml.references) > 0 -} - -func (ml *mountedLayer) deleteReference(ref RWLayer) error { - rl, ok := ml.references[ref] - if !ok { - return ErrLayerNotRetained - } - - if err := rl.release(); err != nil { - return err - } - delete(ml.references, ref) - - return nil -} - -type referencedRWLayer struct { - *mountedLayer - - activityL sync.Mutex - activityCount int -} - -func (rl *referencedRWLayer) release() error { - rl.activityL.Lock() - defer rl.activityL.Unlock() - - if rl.activityCount > 0 { - return ErrActiveMount - } - - rl.activityCount = -1 - - return nil -} - -func (rl *referencedRWLayer) Mount(mountLabel string) (string, error) { - rl.activityL.Lock() - defer rl.activityL.Unlock() - - if rl.activityCount == -1 { - return "", ErrLayerNotRetained - } - - rl.activityCount++ - return rl.mountedLayer.Mount(mountLabel) -} - -func (rl *referencedRWLayer) Unmount() error { - rl.activityL.Lock() - defer rl.activityL.Unlock() - - if rl.activityCount == 0 { - return ErrNotMounted - } - if rl.activityCount == -1 { - return ErrLayerNotRetained - } - rl.activityCount-- - - return rl.mountedLayer.Unmount() -} diff --git a/vendor/github.com/hyperhq/hypercli/layer/ro_layer.go b/vendor/github.com/hyperhq/hypercli/layer/ro_layer.go deleted file mode 100644 index 51e0921dd..000000000 --- a/vendor/github.com/hyperhq/hypercli/layer/ro_layer.go +++ /dev/null @@ -1,119 +0,0 @@ -package layer - -import "io" - -type roLayer struct { - chainID ChainID - diffID DiffID - parent *roLayer - cacheID string - size int64 - layerStore *layerStore - - referenceCount int - references map[Layer]struct{} -} - -func (rl *roLayer) TarStream() (io.ReadCloser, error) { - r, err := rl.layerStore.store.TarSplitReader(rl.chainID) - if err != nil { - return nil, err - } - - pr, pw := io.Pipe() - go func() { - err := rl.layerStore.assembleTarTo(rl.cacheID, r, nil, pw) - if err != nil { - pw.CloseWithError(err) - } else { - pw.Close() - } - }() - return pr, nil -} - -func (rl *roLayer) ChainID() ChainID { - return rl.chainID -} - -func (rl *roLayer) DiffID() DiffID { - return rl.diffID -} - -func (rl *roLayer) Parent() Layer { - if rl.parent == nil { - return nil - } - return rl.parent -} - -func (rl *roLayer) Size() (size int64, err error) { - if rl.parent != nil { - size, err = rl.parent.Size() - if err != nil { - return - } - } - - return size + rl.size, nil -} - -func (rl *roLayer) DiffSize() (size int64, err error) { - return rl.size, nil -} - -func (rl *roLayer) Metadata() (map[string]string, error) { - return rl.layerStore.driver.GetMetadata(rl.cacheID) -} - -type referencedCacheLayer struct { - *roLayer -} - -func (rl *roLayer) getReference() Layer { - ref := &referencedCacheLayer{ - roLayer: rl, - } - rl.references[ref] = struct{}{} - - return ref -} - -func (rl *roLayer) hasReference(ref Layer) bool { - _, ok := rl.references[ref] - return ok -} - -func (rl *roLayer) hasReferences() bool { - return len(rl.references) > 0 -} - -func (rl *roLayer) deleteReference(ref Layer) { - delete(rl.references, ref) -} - -func (rl *roLayer) depth() int { - if rl.parent == nil { - return 1 - } - return rl.parent.depth() + 1 -} - -func storeLayer(tx MetadataTransaction, layer *roLayer) error { - if err := tx.SetDiffID(layer.diffID); err != nil { - return err - } - if err := tx.SetSize(layer.size); err != nil { - return err - } - if err := tx.SetCacheID(layer.cacheID); err != nil { - return err - } - if layer.parent != nil { - if err := tx.SetParent(layer.parent.chainID); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/hyperhq/hypercli/opts/hosts.go b/vendor/github.com/hyperhq/hypercli/opts/hosts.go deleted file mode 100644 index ad1675923..000000000 --- a/vendor/github.com/hyperhq/hypercli/opts/hosts.go +++ /dev/null @@ -1,148 +0,0 @@ -package opts - -import ( - "fmt" - "net" - "net/url" - "strconv" - "strings" -) - -var ( - // DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker daemon -H tcp:// - // These are the IANA registered port numbers for use with Docker - // see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker - DefaultHTTPPort = 2375 // Default HTTP Port - // DefaultTLSHTTPPort Default HTTP Port used when TLS enabled - DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port - // DefaultUnixSocket Path for the unix socket. - // Docker daemon by default always listens on the default unix socket - DefaultUnixSocket = "/var/run/docker.sock" - // DefaultTCPHost constant defines the default host string used by docker on Windows - DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort) - // DefaultTLSHost constant defines the default host string used by docker for TLS sockets - DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort) - // DefaultNamedPipe defines the default named pipe used by docker on Windows - DefaultNamedPipe = `//./pipe/docker_engine` -) - -// ValidateHost validates that the specified string is a valid host and returns it. -func ValidateHost(val string) (string, error) { - host := strings.TrimSpace(val) - // The empty string means default and is not handled by parseDockerDaemonHost - if host != "" { - _, err := parseDockerDaemonHost(host) - if err != nil { - return val, err - } - } - // Note: unlike most flag validators, we don't return the mutated value here - // we need to know what the user entered later (using ParseHost) to adjust for tls - return val, nil -} - -// ParseHost and set defaults for a Daemon host string -func ParseHost(defaultToTLS bool, val string) (string, error) { - host := strings.TrimSpace(val) - if host == "" { - if defaultToTLS { - host = DefaultTLSHost - } else { - host = DefaultHost - } - } else { - var err error - host, err = parseDockerDaemonHost(host) - if err != nil { - return val, err - } - } - return host, nil -} - -// parseDockerDaemonHost parses the specified address and returns an address that will be used as the host. -// Depending of the address specified, this may return one of the global Default* strings defined in hosts.go. -func parseDockerDaemonHost(addr string) (string, error) { - addrParts := strings.Split(addr, "://") - if len(addrParts) == 1 && addrParts[0] != "" { - addrParts = []string{"tcp", addrParts[0]} - } - - switch addrParts[0] { - case "tcp": - return parseTCPAddr(addrParts[1], DefaultTCPHost) - case "unix": - return parseSimpleProtoAddr("unix", addrParts[1], DefaultUnixSocket) - case "npipe": - return parseSimpleProtoAddr("npipe", addrParts[1], DefaultNamedPipe) - case "fd": - return addr, nil - default: - return "", fmt.Errorf("Invalid bind address format: %s", addr) - } -} - -// parseSimpleProtoAddr parses and validates that the specified address is a valid -// socket address for simple protocols like unix and npipe. It returns a formatted -// socket address, either using the address parsed from addr, or the contents of -// defaultAddr if addr is a blank string. -func parseSimpleProtoAddr(proto, addr, defaultAddr string) (string, error) { - addr = strings.TrimPrefix(addr, proto+"://") - if strings.Contains(addr, "://") { - return "", fmt.Errorf("Invalid proto, expected %s: %s", proto, addr) - } - if addr == "" { - addr = defaultAddr - } - return fmt.Sprintf("%s://%s", proto, addr), nil -} - -// parseTCPAddr parses and validates that the specified address is a valid TCP -// address. It returns a formatted TCP address, either using the address parsed -// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string. -// tryAddr is expected to have already been Trim()'d -// defaultAddr must be in the full `tcp://host:port` form -func parseTCPAddr(tryAddr string, defaultAddr string) (string, error) { - if tryAddr == "" || tryAddr == "tcp://" { - return defaultAddr, nil - } - addr := strings.TrimPrefix(tryAddr, "tcp://") - if strings.Contains(addr, "://") || addr == "" { - return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr) - } - - defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://") - defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr) - if err != nil { - return "", err - } - // url.Parse fails for trailing colon on IPv6 brackets on Go 1.5, but - // not 1.4. See https://github.com/golang/go/issues/12200 and - // https://github.com/golang/go/issues/6530. - if strings.HasSuffix(addr, "]:") { - addr += defaultPort - } - - u, err := url.Parse("tcp://" + addr) - if err != nil { - return "", err - } - - host, port, err := net.SplitHostPort(u.Host) - if err != nil { - return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) - } - - if host == "" { - host = defaultHost - } - if port == "" { - port = defaultPort - } - p, err := strconv.Atoi(port) - if err != nil && p == 0 { - return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) - } - - return fmt.Sprintf("tcp://%s%s", net.JoinHostPort(host, port), u.Path), nil -} diff --git a/vendor/github.com/hyperhq/hypercli/opts/hosts_unix.go b/vendor/github.com/hyperhq/hypercli/opts/hosts_unix.go deleted file mode 100644 index 611407a9d..000000000 --- a/vendor/github.com/hyperhq/hypercli/opts/hosts_unix.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !windows - -package opts - -import "fmt" - -// DefaultHost constant defines the default host string used by docker on other hosts than Windows -var DefaultHost = fmt.Sprintf("unix://%s", DefaultUnixSocket) diff --git a/vendor/github.com/hyperhq/hypercli/opts/hosts_windows.go b/vendor/github.com/hyperhq/hypercli/opts/hosts_windows.go deleted file mode 100644 index ec52e9a70..000000000 --- a/vendor/github.com/hyperhq/hypercli/opts/hosts_windows.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build windows - -package opts - -// DefaultHost constant defines the default host string used by docker on Windows -var DefaultHost = DefaultTCPHost diff --git a/vendor/github.com/hyperhq/hypercli/opts/ip.go b/vendor/github.com/hyperhq/hypercli/opts/ip.go deleted file mode 100644 index c7b0dc994..000000000 --- a/vendor/github.com/hyperhq/hypercli/opts/ip.go +++ /dev/null @@ -1,42 +0,0 @@ -package opts - -import ( - "fmt" - "net" -) - -// IPOpt holds an IP. It is used to store values from CLI flags. -type IPOpt struct { - *net.IP -} - -// NewIPOpt creates a new IPOpt from a reference net.IP and a -// string representation of an IP. If the string is not a valid -// IP it will fallback to the specified reference. -func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt { - o := &IPOpt{ - IP: ref, - } - o.Set(defaultVal) - return o -} - -// Set sets an IPv4 or IPv6 address from a given string. If the given -// string is not parseable as an IP address it returns an error. -func (o *IPOpt) Set(val string) error { - ip := net.ParseIP(val) - if ip == nil { - return fmt.Errorf("%s is not an ip address", val) - } - *o.IP = ip - return nil -} - -// String returns the IP address stored in the IPOpt. If stored IP is a -// nil pointer, it returns an empty string. -func (o *IPOpt) String() string { - if *o.IP == nil { - return "" - } - return o.IP.String() -} diff --git a/vendor/github.com/hyperhq/hypercli/opts/opts.go b/vendor/github.com/hyperhq/hypercli/opts/opts.go deleted file mode 100644 index 05aadbe74..000000000 --- a/vendor/github.com/hyperhq/hypercli/opts/opts.go +++ /dev/null @@ -1,242 +0,0 @@ -package opts - -import ( - "fmt" - "net" - "regexp" - "strings" -) - -var ( - alphaRegexp = regexp.MustCompile(`[a-zA-Z]`) - domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`) -) - -// ListOpts holds a list of values and a validation function. -type ListOpts struct { - values *[]string - validator ValidatorFctType -} - -// NewListOpts creates a new ListOpts with the specified validator. -func NewListOpts(validator ValidatorFctType) ListOpts { - var values []string - return *NewListOptsRef(&values, validator) -} - -// NewListOptsRef creates a new ListOpts with the specified values and validator. -func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts { - return &ListOpts{ - values: values, - validator: validator, - } -} - -func (opts *ListOpts) String() string { - return fmt.Sprintf("%v", []string((*opts.values))) -} - -// Set validates if needed the input value and add it to the -// internal slice. -func (opts *ListOpts) Set(value string) error { - if opts.validator != nil { - v, err := opts.validator(value) - if err != nil { - return err - } - value = v - } - (*opts.values) = append((*opts.values), value) - return nil -} - -// Delete removes the specified element from the slice. -func (opts *ListOpts) Delete(key string) { - for i, k := range *opts.values { - if k == key { - (*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...) - return - } - } -} - -// GetMap returns the content of values in a map in order to avoid -// duplicates. -func (opts *ListOpts) GetMap() map[string]struct{} { - ret := make(map[string]struct{}) - for _, k := range *opts.values { - ret[k] = struct{}{} - } - return ret -} - -// GetAll returns the values of slice. -func (opts *ListOpts) GetAll() []string { - return (*opts.values) -} - -// GetAllOrEmpty returns the values of the slice -// or an empty slice when there are no values. -func (opts *ListOpts) GetAllOrEmpty() []string { - v := *opts.values - if v == nil { - return make([]string, 0) - } - return v -} - -// Get checks the existence of the specified key. -func (opts *ListOpts) Get(key string) bool { - for _, k := range *opts.values { - if k == key { - return true - } - } - return false -} - -// Len returns the amount of element in the slice. -func (opts *ListOpts) Len() int { - return len((*opts.values)) -} - -// NamedOption is an interface that list and map options -// with names implement. -type NamedOption interface { - Name() string -} - -// NamedListOpts is a ListOpts with a configuration name. -// This struct is useful to keep reference to the assigned -// field name in the internal configuration struct. -type NamedListOpts struct { - name string - ListOpts -} - -var _ NamedOption = &NamedListOpts{} - -// NewNamedListOptsRef creates a reference to a new NamedListOpts struct. -func NewNamedListOptsRef(name string, values *[]string, validator ValidatorFctType) *NamedListOpts { - return &NamedListOpts{ - name: name, - ListOpts: *NewListOptsRef(values, validator), - } -} - -// Name returns the name of the NamedListOpts in the configuration. -func (o *NamedListOpts) Name() string { - return o.name -} - -//MapOpts holds a map of values and a validation function. -type MapOpts struct { - values map[string]string - validator ValidatorFctType -} - -// Set validates if needed the input value and add it to the -// internal map, by splitting on '='. -func (opts *MapOpts) Set(value string) error { - if opts.validator != nil { - v, err := opts.validator(value) - if err != nil { - return err - } - value = v - } - vals := strings.SplitN(value, "=", 2) - if len(vals) == 1 { - (opts.values)[vals[0]] = "" - } else { - (opts.values)[vals[0]] = vals[1] - } - return nil -} - -// GetAll returns the values of MapOpts as a map. -func (opts *MapOpts) GetAll() map[string]string { - return opts.values -} - -func (opts *MapOpts) String() string { - return fmt.Sprintf("%v", map[string]string((opts.values))) -} - -// NewMapOpts creates a new MapOpts with the specified map of values and a validator. -func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts { - if values == nil { - values = make(map[string]string) - } - return &MapOpts{ - values: values, - validator: validator, - } -} - -// NamedMapOpts is a MapOpts struct with a configuration name. -// This struct is useful to keep reference to the assigned -// field name in the internal configuration struct. -type NamedMapOpts struct { - name string - MapOpts -} - -var _ NamedOption = &NamedMapOpts{} - -// NewNamedMapOpts creates a reference to a new NamedMapOpts struct. -func NewNamedMapOpts(name string, values map[string]string, validator ValidatorFctType) *NamedMapOpts { - return &NamedMapOpts{ - name: name, - MapOpts: *NewMapOpts(values, validator), - } -} - -// Name returns the name of the NamedMapOpts in the configuration. -func (o *NamedMapOpts) Name() string { - return o.name -} - -// ValidatorFctType defines a validator function that returns a validated string and/or an error. -type ValidatorFctType func(val string) (string, error) - -// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error -type ValidatorFctListType func(val string) ([]string, error) - -// ValidateIPAddress validates an Ip address. -func ValidateIPAddress(val string) (string, error) { - var ip = net.ParseIP(strings.TrimSpace(val)) - if ip != nil { - return ip.String(), nil - } - return "", fmt.Errorf("%s is not an ip address", val) -} - -// ValidateDNSSearch validates domain for resolvconf search configuration. -// A zero length domain is represented by a dot (.). -func ValidateDNSSearch(val string) (string, error) { - if val = strings.Trim(val, " "); val == "." { - return val, nil - } - return validateDomain(val) -} - -func validateDomain(val string) (string, error) { - if alphaRegexp.FindString(val) == "" { - return "", fmt.Errorf("%s is not a valid domain", val) - } - ns := domainRegexp.FindSubmatch([]byte(val)) - if len(ns) > 0 && len(ns[1]) < 255 { - return string(ns[1]), nil - } - return "", fmt.Errorf("%s is not a valid domain", val) -} - -// ValidateLabel validates that the specified string is a valid label, and returns it. -// Labels are in the form on key=value. -func ValidateLabel(val string) (string, error) { - if strings.Count(val, "=") < 1 { - return "", fmt.Errorf("bad attribute format: %s", val) - } - return val, nil -} diff --git a/vendor/github.com/hyperhq/hypercli/opts/opts_unix.go b/vendor/github.com/hyperhq/hypercli/opts/opts_unix.go deleted file mode 100644 index f1ce844a8..000000000 --- a/vendor/github.com/hyperhq/hypercli/opts/opts_unix.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build !windows - -package opts - -// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080 -const DefaultHTTPHost = "localhost" diff --git a/vendor/github.com/hyperhq/hypercli/opts/opts_windows.go b/vendor/github.com/hyperhq/hypercli/opts/opts_windows.go deleted file mode 100644 index 2a9e2be74..000000000 --- a/vendor/github.com/hyperhq/hypercli/opts/opts_windows.go +++ /dev/null @@ -1,56 +0,0 @@ -package opts - -// TODO Windows. Identify bug in GOLang 1.5.1 and/or Windows Server 2016 TP4. -// @jhowardmsft, @swernli. -// -// On Windows, this mitigates a problem with the default options of running -// a docker client against a local docker daemon on TP4. -// -// What was found that if the default host is "localhost", even if the client -// (and daemon as this is local) is not physically on a network, and the DNS -// cache is flushed (ipconfig /flushdns), then the client will pause for -// exactly one second when connecting to the daemon for calls. For example -// using docker run windowsservercore cmd, the CLI will send a create followed -// by an attach. You see the delay between the attach finishing and the attach -// being seen by the daemon. -// -// Here's some daemon debug logs with additional debug spew put in. The -// AfterWriteJSON log is the very last thing the daemon does as part of the -// create call. The POST /attach is the second CLI call. Notice the second -// time gap. -// -// time="2015-11-06T13:38:37.259627400-08:00" level=debug msg="After createRootfs" -// time="2015-11-06T13:38:37.263626300-08:00" level=debug msg="After setHostConfig" -// time="2015-11-06T13:38:37.267631200-08:00" level=debug msg="before createContainerPl...." -// time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=ToDiskLocking.... -// time="2015-11-06T13:38:37.275643200-08:00" level=debug msg="loggin event...." -// time="2015-11-06T13:38:37.277627600-08:00" level=debug msg="logged event...." -// time="2015-11-06T13:38:37.279631800-08:00" level=debug msg="In defer func" -// time="2015-11-06T13:38:37.282628100-08:00" level=debug msg="After daemon.create" -// time="2015-11-06T13:38:37.286651700-08:00" level=debug msg="return 2" -// time="2015-11-06T13:38:37.289629500-08:00" level=debug msg="Returned from daemon.ContainerCreate" -// time="2015-11-06T13:38:37.311629100-08:00" level=debug msg="After WriteJSON" -// ... 1 second gap here.... -// time="2015-11-06T13:38:38.317866200-08:00" level=debug msg="Calling POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach" -// time="2015-11-06T13:38:38.326882500-08:00" level=info msg="POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach?stderr=1&stdin=1&stdout=1&stream=1" -// -// We suspect this is either a bug introduced in GOLang 1.5.1, or that a change -// in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows TP4. In theory, -// the Windows networking stack is supposed to resolve "localhost" internally, -// without hitting DNS, or even reading the hosts file (which is why localhost -// is commented out in the hosts file on Windows). -// -// We have validated that working around this using the actual IPv4 localhost -// address does not cause the delay. -// -// This does not occur with the docker client built with 1.4.3 on the same -// Windows TP4 build, regardless of whether the daemon is built using 1.5.1 -// or 1.4.3. It does not occur on Linux. We also verified we see the same thing -// on a cross-compiled Windows binary (from Linux). -// -// Final note: This is a mitigation, not a 'real' fix. It is still susceptible -// to the delay in TP4 if a user were to do 'docker run -H=tcp://localhost:2375...' -// explicitly. - -// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080 -const DefaultHTTPHost = "127.0.0.1" diff --git a/vendor/github.com/hyperhq/hypercli/pkg/archive/archive.go b/vendor/github.com/hyperhq/hypercli/pkg/archive/archive.go deleted file mode 100644 index cd72659f1..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/archive/archive.go +++ /dev/null @@ -1,1049 +0,0 @@ -package archive - -import ( - "archive/tar" - "bufio" - "bytes" - "compress/bzip2" - "compress/gzip" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "runtime" - "strings" - "syscall" - - "github.com/Sirupsen/logrus" - "github.com/hyperhq/hypercli/pkg/fileutils" - "github.com/hyperhq/hypercli/pkg/idtools" - "github.com/hyperhq/hypercli/pkg/ioutils" - "github.com/hyperhq/hypercli/pkg/pools" - "github.com/hyperhq/hypercli/pkg/promise" - "github.com/hyperhq/hypercli/pkg/system" -) - -type ( - // Archive is a type of io.ReadCloser which has two interfaces Read and Closer. - Archive io.ReadCloser - // Reader is a type of io.Reader. - Reader io.Reader - // Compression is the state represents if compressed or not. - Compression int - // TarChownOptions wraps the chown options UID and GID. - TarChownOptions struct { - UID, GID int - } - // TarOptions wraps the tar options. - TarOptions struct { - IncludeFiles []string - ExcludePatterns []string - Compression Compression - NoLchown bool - UIDMaps []idtools.IDMap - GIDMaps []idtools.IDMap - ChownOpts *TarChownOptions - IncludeSourceDir bool - // When unpacking, specifies whether overwriting a directory with a - // non-directory is allowed and vice versa. - NoOverwriteDirNonDir bool - // For each include when creating an archive, the included name will be - // replaced with the matching name from this map. - RebaseNames map[string]string - } - - // Archiver allows the reuse of most utility functions of this package - // with a pluggable Untar function. Also, to facilitate the passing of - // specific id mappings for untar, an archiver can be created with maps - // which will then be passed to Untar operations - Archiver struct { - Untar func(io.Reader, string, *TarOptions) error - UIDMaps []idtools.IDMap - GIDMaps []idtools.IDMap - } - - // breakoutError is used to differentiate errors related to breaking out - // When testing archive breakout in the unit tests, this error is expected - // in order for the test to pass. - breakoutError error -) - -var ( - // ErrNotImplemented is the error message of function not implemented. - ErrNotImplemented = errors.New("Function not implemented") - defaultArchiver = &Archiver{Untar: Untar, UIDMaps: nil, GIDMaps: nil} -) - -const ( - // HeaderSize is the size in bytes of a tar header - HeaderSize = 512 -) - -const ( - // Uncompressed represents the uncompressed. - Uncompressed Compression = iota - // Bzip2 is bzip2 compression algorithm. - Bzip2 - // Gzip is gzip compression algorithm. - Gzip - // Xz is xz compression algorithm. - Xz -) - -// IsArchive checks for the magic bytes of a tar or any supported compression -// algorithm. -func IsArchive(header []byte) bool { - compression := DetectCompression(header) - if compression != Uncompressed { - return true - } - r := tar.NewReader(bytes.NewBuffer(header)) - _, err := r.Next() - return err == nil -} - -// IsArchivePath checks if the (possibly compressed) file at the given path -// starts with a tar file header. -func IsArchivePath(path string) bool { - file, err := os.Open(path) - if err != nil { - return false - } - defer file.Close() - rdr, err := DecompressStream(file) - if err != nil { - return false - } - r := tar.NewReader(rdr) - _, err = r.Next() - return err == nil -} - -// DetectCompression detects the compression algorithm of the source. -func DetectCompression(source []byte) Compression { - for compression, m := range map[Compression][]byte{ - Bzip2: {0x42, 0x5A, 0x68}, - Gzip: {0x1F, 0x8B, 0x08}, - Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, - } { - if len(source) < len(m) { - logrus.Debugf("Len too short") - continue - } - if bytes.Compare(m, source[:len(m)]) == 0 { - return compression - } - } - return Uncompressed -} - -func xzDecompress(archive io.Reader) (io.ReadCloser, <-chan struct{}, error) { - args := []string{"xz", "-d", "-c", "-q"} - - return cmdStream(exec.Command(args[0], args[1:]...), archive) -} - -// DecompressStream decompress the archive and returns a ReaderCloser with the decompressed archive. -func DecompressStream(archive io.Reader) (io.ReadCloser, error) { - p := pools.BufioReader32KPool - buf := p.Get(archive) - bs, err := buf.Peek(10) - if err != nil && err != io.EOF { - // Note: we'll ignore any io.EOF error because there are some odd - // cases where the layer.tar file will be empty (zero bytes) and - // that results in an io.EOF from the Peek() call. So, in those - // cases we'll just treat it as a non-compressed stream and - // that means just create an empty layer. - // See Issue 18170 - return nil, err - } - - compression := DetectCompression(bs) - switch compression { - case Uncompressed: - readBufWrapper := p.NewReadCloserWrapper(buf, buf) - return readBufWrapper, nil - case Gzip: - gzReader, err := gzip.NewReader(buf) - if err != nil { - return nil, err - } - readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) - return readBufWrapper, nil - case Bzip2: - bz2Reader := bzip2.NewReader(buf) - readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) - return readBufWrapper, nil - case Xz: - xzReader, chdone, err := xzDecompress(buf) - if err != nil { - return nil, err - } - readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) - return ioutils.NewReadCloserWrapper(readBufWrapper, func() error { - <-chdone - return readBufWrapper.Close() - }), nil - default: - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - } -} - -// CompressStream compresses the dest with specified compression algorithm. -func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) { - p := pools.BufioWriter32KPool - buf := p.Get(dest) - switch compression { - case Uncompressed: - writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) - return writeBufWrapper, nil - case Gzip: - gzWriter := gzip.NewWriter(dest) - writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) - return writeBufWrapper, nil - case Bzip2, Xz: - // archive/bzip2 does not support writing, and there is no xz support at all - // However, this is not a problem as docker only currently generates gzipped tars - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - default: - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - } -} - -// Extension returns the extension of a file that uses the specified compression algorithm. -func (compression *Compression) Extension() string { - switch *compression { - case Uncompressed: - return "tar" - case Bzip2: - return "tar.bz2" - case Gzip: - return "tar.gz" - case Xz: - return "tar.xz" - } - return "" -} - -type tarAppender struct { - TarWriter *tar.Writer - Buffer *bufio.Writer - - // for hardlink mapping - SeenFiles map[uint64]string - UIDMaps []idtools.IDMap - GIDMaps []idtools.IDMap -} - -// canonicalTarName provides a platform-independent and consistent posix-style -//path for files and directories to be archived regardless of the platform. -func canonicalTarName(name string, isDir bool) (string, error) { - name, err := CanonicalTarNameForPath(name) - if err != nil { - return "", err - } - - // suffix with '/' for directories - if isDir && !strings.HasSuffix(name, "/") { - name += "/" - } - return name, nil -} - -func (ta *tarAppender) addTarFile(path, name string) error { - fi, err := os.Lstat(path) - if err != nil { - return err - } - - link := "" - if fi.Mode()&os.ModeSymlink != 0 { - if link, err = os.Readlink(path); err != nil { - return err - } - } - - hdr, err := tar.FileInfoHeader(fi, link) - if err != nil { - return err - } - hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) - - name, err = canonicalTarName(name, fi.IsDir()) - if err != nil { - return fmt.Errorf("tar: cannot canonicalize path: %v", err) - } - hdr.Name = name - - inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys()) - if err != nil { - return err - } - - // if it's not a directory and has more than 1 link, - // it's hardlinked, so set the type flag accordingly - if !fi.IsDir() && hasHardlinks(fi) { - // a link should have a name that it links too - // and that linked name should be first in the tar archive - if oldpath, ok := ta.SeenFiles[inode]; ok { - hdr.Typeflag = tar.TypeLink - hdr.Linkname = oldpath - hdr.Size = 0 // This Must be here for the writer math to add up! - } else { - ta.SeenFiles[inode] = name - } - } - - capability, _ := system.Lgetxattr(path, "security.capability") - if capability != nil { - hdr.Xattrs = make(map[string]string) - hdr.Xattrs["security.capability"] = string(capability) - } - - //handle re-mapping container ID mappings back to host ID mappings before - //writing tar headers/files. We skip whiteout files because they were written - //by the kernel and already have proper ownership relative to the host - if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && (ta.UIDMaps != nil || ta.GIDMaps != nil) { - uid, gid, err := getFileUIDGID(fi.Sys()) - if err != nil { - return err - } - xUID, err := idtools.ToContainer(uid, ta.UIDMaps) - if err != nil { - return err - } - xGID, err := idtools.ToContainer(gid, ta.GIDMaps) - if err != nil { - return err - } - hdr.Uid = xUID - hdr.Gid = xGID - } - - if err := ta.TarWriter.WriteHeader(hdr); err != nil { - return err - } - - if hdr.Typeflag == tar.TypeReg { - file, err := os.Open(path) - if err != nil { - return err - } - - ta.Buffer.Reset(ta.TarWriter) - defer ta.Buffer.Reset(nil) - _, err = io.Copy(ta.Buffer, file) - file.Close() - if err != nil { - return err - } - err = ta.Buffer.Flush() - if err != nil { - return err - } - } - - return nil -} - -func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *TarChownOptions) error { - // hdr.Mode is in linux format, which we can use for sycalls, - // but for os.Foo() calls we need the mode converted to os.FileMode, - // so use hdrInfo.Mode() (they differ for e.g. setuid bits) - hdrInfo := hdr.FileInfo() - - switch hdr.Typeflag { - case tar.TypeDir: - // Create directory unless it exists as a directory already. - // In that case we just want to merge the two - if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { - if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { - return err - } - } - - case tar.TypeReg, tar.TypeRegA: - // Source is regular file - file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) - if err != nil { - return err - } - if _, err := io.Copy(file, reader); err != nil { - file.Close() - return err - } - file.Close() - - case tar.TypeBlock, tar.TypeChar, tar.TypeFifo: - // Handle this is an OS-specific way - if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { - return err - } - - case tar.TypeLink: - targetPath := filepath.Join(extractDir, hdr.Linkname) - // check for hardlink breakout - if !strings.HasPrefix(targetPath, extractDir) { - return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) - } - if err := os.Link(targetPath, path); err != nil { - return err - } - - case tar.TypeSymlink: - // path -> hdr.Linkname = targetPath - // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file - targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) - - // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because - // that symlink would first have to be created, which would be caught earlier, at this very check: - if !strings.HasPrefix(targetPath, extractDir) { - return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) - } - if err := os.Symlink(hdr.Linkname, path); err != nil { - return err - } - - case tar.TypeXGlobalHeader: - logrus.Debugf("PAX Global Extended Headers found and ignored") - return nil - - default: - return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag) - } - - // Lchown is not supported on Windows. - if Lchown && runtime.GOOS != "windows" { - if chownOpts == nil { - chownOpts = &TarChownOptions{UID: hdr.Uid, GID: hdr.Gid} - } - if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { - return err - } - } - - for key, value := range hdr.Xattrs { - if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { - return err - } - } - - // There is no LChmod, so ignore mode for symlink. Also, this - // must happen after chown, as that can modify the file mode - if err := handleLChmod(hdr, path, hdrInfo); err != nil { - return err - } - - aTime := hdr.AccessTime - if aTime.Before(hdr.ModTime) { - // Last access time should never be before last modified time. - aTime = hdr.ModTime - } - - // system.Chtimes doesn't support a NOFOLLOW flag atm - if hdr.Typeflag == tar.TypeLink { - if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { - if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { - return err - } - } - } else if hdr.Typeflag != tar.TypeSymlink { - if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { - return err - } - } else { - ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} - if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { - return err - } - } - return nil -} - -// Tar creates an archive from the directory at `path`, and returns it as a -// stream of bytes. -func Tar(path string, compression Compression) (io.ReadCloser, error) { - return TarWithOptions(path, &TarOptions{Compression: compression}) -} - -// TarWithOptions creates an archive from the directory at `path`, only including files whose relative -// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. -func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { - - // Fix the source path to work with long path names. This is a no-op - // on platforms other than Windows. - srcPath = fixVolumePathPrefix(srcPath) - - patterns, patDirs, exceptions, err := fileutils.CleanPatterns(options.ExcludePatterns) - - if err != nil { - return nil, err - } - - pipeReader, pipeWriter := io.Pipe() - - compressWriter, err := CompressStream(pipeWriter, options.Compression) - if err != nil { - return nil, err - } - - go func() { - ta := &tarAppender{ - TarWriter: tar.NewWriter(compressWriter), - Buffer: pools.BufioWriter32KPool.Get(nil), - SeenFiles: make(map[uint64]string), - UIDMaps: options.UIDMaps, - GIDMaps: options.GIDMaps, - } - - defer func() { - // Make sure to check the error on Close. - if err := ta.TarWriter.Close(); err != nil { - logrus.Debugf("Can't close tar writer: %s", err) - } - if err := compressWriter.Close(); err != nil { - logrus.Debugf("Can't close compress writer: %s", err) - } - if err := pipeWriter.Close(); err != nil { - logrus.Debugf("Can't close pipe writer: %s", err) - } - }() - - // this buffer is needed for the duration of this piped stream - defer pools.BufioWriter32KPool.Put(ta.Buffer) - - // In general we log errors here but ignore them because - // during e.g. a diff operation the container can continue - // mutating the filesystem and we can see transient errors - // from this - - stat, err := os.Lstat(srcPath) - if err != nil { - return - } - - if !stat.IsDir() { - // We can't later join a non-dir with any includes because the - // 'walk' will error if "file/." is stat-ed and "file" is not a - // directory. So, we must split the source path and use the - // basename as the include. - if len(options.IncludeFiles) > 0 { - logrus.Warn("Tar: Can't archive a file with includes") - } - - dir, base := SplitPathDirEntry(srcPath) - srcPath = dir - options.IncludeFiles = []string{base} - } - - if len(options.IncludeFiles) == 0 { - options.IncludeFiles = []string{"."} - } - - seen := make(map[string]bool) - - for _, include := range options.IncludeFiles { - rebaseName := options.RebaseNames[include] - - walkRoot := getWalkRoot(srcPath, include) - filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { - if err != nil { - logrus.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err) - return nil - } - - relFilePath, err := filepath.Rel(srcPath, filePath) - if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { - // Error getting relative path OR we are looking - // at the source directory path. Skip in both situations. - return nil - } - - if options.IncludeSourceDir && include == "." && relFilePath != "." { - relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) - } - - skip := false - - // If "include" is an exact match for the current file - // then even if there's an "excludePatterns" pattern that - // matches it, don't skip it. IOW, assume an explicit 'include' - // is asking for that file no matter what - which is true - // for some files, like .dockerignore and Dockerfile (sometimes) - if include != relFilePath { - skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs) - if err != nil { - logrus.Debugf("Error matching %s: %v", relFilePath, err) - return err - } - } - - if skip { - if !exceptions && f.IsDir() { - return filepath.SkipDir - } - return nil - } - - if seen[relFilePath] { - return nil - } - seen[relFilePath] = true - - // Rename the base resource. - if rebaseName != "" { - var replacement string - if rebaseName != string(filepath.Separator) { - // Special case the root directory to replace with an - // empty string instead so that we don't end up with - // double slashes in the paths. - replacement = rebaseName - } - - relFilePath = strings.Replace(relFilePath, include, replacement, 1) - } - - if err := ta.addTarFile(filePath, relFilePath); err != nil { - logrus.Debugf("Can't add file %s to tar: %s", filePath, err) - } - return nil - }) - } - }() - - return pipeReader, nil -} - -// Unpack unpacks the decompressedArchive to dest with options. -func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { - tr := tar.NewReader(decompressedArchive) - trBuf := pools.BufioReader32KPool.Get(nil) - defer pools.BufioReader32KPool.Put(trBuf) - - var dirs []*tar.Header - remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) - if err != nil { - return err - } - - // Iterate through the files in the archive. -loop: - for { - hdr, err := tr.Next() - if err == io.EOF { - // end of tar archive - break - } - if err != nil { - return err - } - - // Normalize name, for safety and for a simple is-root check - // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: - // This keeps "..\" as-is, but normalizes "\..\" to "\". - hdr.Name = filepath.Clean(hdr.Name) - - for _, exclude := range options.ExcludePatterns { - if strings.HasPrefix(hdr.Name, exclude) { - continue loop - } - } - - // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in - // the filepath format for the OS on which the daemon is running. Hence - // the check for a slash-suffix MUST be done in an OS-agnostic way. - if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { - // Not the root directory, ensure that the parent directory exists - parent := filepath.Dir(hdr.Name) - parentPath := filepath.Join(dest, parent) - if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = system.MkdirAll(parentPath, 0777) - if err != nil { - return err - } - } - } - - path := filepath.Join(dest, hdr.Name) - rel, err := filepath.Rel(dest, path) - if err != nil { - return err - } - if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { - return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) - } - - // If path exits we almost always just want to remove and replace it - // The only exception is when it is a directory *and* the file from - // the layer is also a directory. Then we want to merge them (i.e. - // just apply the metadata from the layer). - if fi, err := os.Lstat(path); err == nil { - if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { - // If NoOverwriteDirNonDir is true then we cannot replace - // an existing directory with a non-directory from the archive. - return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) - } - - if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { - // If NoOverwriteDirNonDir is true then we cannot replace - // an existing non-directory with a directory from the archive. - return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) - } - - if fi.IsDir() && hdr.Name == "." { - continue - } - - if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { - if err := os.RemoveAll(path); err != nil { - return err - } - } - } - trBuf.Reset(tr) - - // if the options contain a uid & gid maps, convert header uid/gid - // entries using the maps such that lchown sets the proper mapped - // uid/gid after writing the file. We only perform this mapping if - // the file isn't already owned by the remapped root UID or GID, as - // that specific uid/gid has no mapping from container -> host, and - // those files already have the proper ownership for inside the - // container. - if hdr.Uid != remappedRootUID { - xUID, err := idtools.ToHost(hdr.Uid, options.UIDMaps) - if err != nil { - return err - } - hdr.Uid = xUID - } - if hdr.Gid != remappedRootGID { - xGID, err := idtools.ToHost(hdr.Gid, options.GIDMaps) - if err != nil { - return err - } - hdr.Gid = xGID - } - - if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts); err != nil { - return err - } - - // Directory mtimes must be handled at the end to avoid further - // file creation in them to modify the directory mtime - if hdr.Typeflag == tar.TypeDir { - dirs = append(dirs, hdr) - } - } - - for _, hdr := range dirs { - path := filepath.Join(dest, hdr.Name) - - if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { - return err - } - } - return nil -} - -// Untar reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive may be compressed with one of the following algorithms: -// identity (uncompressed), gzip, bzip2, xz. -// FIXME: specify behavior when target path exists vs. doesn't exist. -func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { - return untarHandler(tarArchive, dest, options, true) -} - -// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive must be an uncompressed stream. -func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { - return untarHandler(tarArchive, dest, options, false) -} - -// Handler for teasing out the automatic decompression -func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { - if tarArchive == nil { - return fmt.Errorf("Empty archive") - } - dest = filepath.Clean(dest) - if options == nil { - options = &TarOptions{} - } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } - - r := tarArchive - if decompress { - decompressedArchive, err := DecompressStream(tarArchive) - if err != nil { - return err - } - defer decompressedArchive.Close() - r = decompressedArchive - } - - return Unpack(r, dest, options) -} - -// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. -// If either Tar or Untar fails, TarUntar aborts and returns the error. -func (archiver *Archiver) TarUntar(src, dst string) error { - logrus.Debugf("TarUntar(%s %s)", src, dst) - archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) - if err != nil { - return err - } - defer archive.Close() - - var options *TarOptions - if archiver.UIDMaps != nil || archiver.GIDMaps != nil { - options = &TarOptions{ - UIDMaps: archiver.UIDMaps, - GIDMaps: archiver.GIDMaps, - } - } - return archiver.Untar(archive, dst, options) -} - -// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. -// If either Tar or Untar fails, TarUntar aborts and returns the error. -func TarUntar(src, dst string) error { - return defaultArchiver.TarUntar(src, dst) -} - -// UntarPath untar a file from path to a destination, src is the source tar file path. -func (archiver *Archiver) UntarPath(src, dst string) error { - archive, err := os.Open(src) - if err != nil { - return err - } - defer archive.Close() - var options *TarOptions - if archiver.UIDMaps != nil || archiver.GIDMaps != nil { - options = &TarOptions{ - UIDMaps: archiver.UIDMaps, - GIDMaps: archiver.GIDMaps, - } - } - return archiver.Untar(archive, dst, options) -} - -// UntarPath is a convenience function which looks for an archive -// at filesystem path `src`, and unpacks it at `dst`. -func UntarPath(src, dst string) error { - return defaultArchiver.UntarPath(src, dst) -} - -// CopyWithTar creates a tar archive of filesystem path `src`, and -// unpacks it at filesystem path `dst`. -// The archive is streamed directly with fixed buffering and no -// intermediary disk IO. -func (archiver *Archiver) CopyWithTar(src, dst string) error { - srcSt, err := os.Stat(src) - if err != nil { - return err - } - if !srcSt.IsDir() { - return archiver.CopyFileWithTar(src, dst) - } - // Create dst, copy src's content into it - logrus.Debugf("Creating dest directory: %s", dst) - if err := system.MkdirAll(dst, 0755); err != nil { - return err - } - logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) - return archiver.TarUntar(src, dst) -} - -// CopyWithTar creates a tar archive of filesystem path `src`, and -// unpacks it at filesystem path `dst`. -// The archive is streamed directly with fixed buffering and no -// intermediary disk IO. -func CopyWithTar(src, dst string) error { - return defaultArchiver.CopyWithTar(src, dst) -} - -// CopyFileWithTar emulates the behavior of the 'cp' command-line -// for a single file. It copies a regular file from path `src` to -// path `dst`, and preserves all its metadata. -func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { - logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) - srcSt, err := os.Stat(src) - if err != nil { - return err - } - - if srcSt.IsDir() { - return fmt.Errorf("Can't copy a directory") - } - - // Clean up the trailing slash. This must be done in an operating - // system specific manner. - if dst[len(dst)-1] == os.PathSeparator { - dst = filepath.Join(dst, filepath.Base(src)) - } - // Create the holding directory if necessary - if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { - return err - } - - r, w := io.Pipe() - errC := promise.Go(func() error { - defer w.Close() - - srcF, err := os.Open(src) - if err != nil { - return err - } - defer srcF.Close() - - hdr, err := tar.FileInfoHeader(srcSt, "") - if err != nil { - return err - } - hdr.Name = filepath.Base(dst) - hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) - - remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps) - if err != nil { - return err - } - - // only perform mapping if the file being copied isn't already owned by the - // uid or gid of the remapped root in the container - if remappedRootUID != hdr.Uid { - xUID, err := idtools.ToHost(hdr.Uid, archiver.UIDMaps) - if err != nil { - return err - } - hdr.Uid = xUID - } - if remappedRootGID != hdr.Gid { - xGID, err := idtools.ToHost(hdr.Gid, archiver.GIDMaps) - if err != nil { - return err - } - hdr.Gid = xGID - } - - tw := tar.NewWriter(w) - defer tw.Close() - if err := tw.WriteHeader(hdr); err != nil { - return err - } - if _, err := io.Copy(tw, srcF); err != nil { - return err - } - return nil - }) - defer func() { - if er := <-errC; err != nil { - err = er - } - }() - - err = archiver.Untar(r, filepath.Dir(dst), nil) - if err != nil { - r.CloseWithError(err) - } - return err -} - -// CopyFileWithTar emulates the behavior of the 'cp' command-line -// for a single file. It copies a regular file from path `src` to -// path `dst`, and preserves all its metadata. -// -// Destination handling is in an operating specific manner depending -// where the daemon is running. If `dst` ends with a trailing slash -// the final destination path will be `dst/base(src)` (Linux) or -// `dst\base(src)` (Windows). -func CopyFileWithTar(src, dst string) (err error) { - return defaultArchiver.CopyFileWithTar(src, dst) -} - -// cmdStream executes a command, and returns its stdout as a stream. -// If the command fails to run or doesn't complete successfully, an error -// will be returned, including anything written on stderr. -func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, <-chan struct{}, error) { - chdone := make(chan struct{}) - cmd.Stdin = input - pipeR, pipeW := io.Pipe() - cmd.Stdout = pipeW - var errBuf bytes.Buffer - cmd.Stderr = &errBuf - - // Run the command and return the pipe - if err := cmd.Start(); err != nil { - return nil, nil, err - } - - // Copy stdout to the returned pipe - go func() { - if err := cmd.Wait(); err != nil { - pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) - } else { - pipeW.Close() - } - close(chdone) - }() - - return pipeR, chdone, nil -} - -// NewTempArchive reads the content of src into a temporary file, and returns the contents -// of that file as an archive. The archive can only be read once - as soon as reading completes, -// the file will be deleted. -func NewTempArchive(src Archive, dir string) (*TempArchive, error) { - f, err := ioutil.TempFile(dir, "") - if err != nil { - return nil, err - } - if _, err := io.Copy(f, src); err != nil { - return nil, err - } - if _, err := f.Seek(0, 0); err != nil { - return nil, err - } - st, err := f.Stat() - if err != nil { - return nil, err - } - size := st.Size() - return &TempArchive{File: f, Size: size}, nil -} - -// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, -// the file will be deleted. -type TempArchive struct { - *os.File - Size int64 // Pre-computed from Stat().Size() as a convenience - read int64 - closed bool -} - -// Close closes the underlying file if it's still open, or does a no-op -// to allow callers to try to close the TempArchive multiple times safely. -func (archive *TempArchive) Close() error { - if archive.closed { - return nil - } - - archive.closed = true - - return archive.File.Close() -} - -func (archive *TempArchive) Read(data []byte) (int, error) { - n, err := archive.File.Read(data) - archive.read += int64(n) - if err != nil || archive.read == archive.Size { - archive.Close() - os.Remove(archive.File.Name()) - } - return n, err -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/archive/archive_unix.go b/vendor/github.com/hyperhq/hypercli/pkg/archive/archive_unix.go deleted file mode 100644 index ec5c335df..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/archive/archive_unix.go +++ /dev/null @@ -1,112 +0,0 @@ -// +build !windows - -package archive - -import ( - "archive/tar" - "errors" - "os" - "path/filepath" - "syscall" - - "github.com/hyperhq/hypercli/pkg/system" -) - -// fixVolumePathPrefix does platform specific processing to ensure that if -// the path being passed in is not in a volume path format, convert it to one. -func fixVolumePathPrefix(srcPath string) string { - return srcPath -} - -// getWalkRoot calculates the root path when performing a TarWithOptions. -// We use a separate function as this is platform specific. On Linux, we -// can't use filepath.Join(srcPath,include) because this will clean away -// a trailing "." or "/" which may be important. -func getWalkRoot(srcPath string, include string) string { - return srcPath + string(filepath.Separator) + include -} - -// CanonicalTarNameForPath returns platform-specific filepath -// to canonical posix-style path for tar archival. p is relative -// path. -func CanonicalTarNameForPath(p string) (string, error) { - return p, nil // already unix-style -} - -// chmodTarEntry is used to adjust the file permissions used in tar header based -// on the platform the archival is done. - -func chmodTarEntry(perm os.FileMode) os.FileMode { - return perm // noop for unix as golang APIs provide perm bits correctly -} - -func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) { - s, ok := stat.(*syscall.Stat_t) - - if !ok { - err = errors.New("cannot convert stat value to syscall.Stat_t") - return - } - - inode = uint64(s.Ino) - - // Currently go does not fill in the major/minors - if s.Mode&syscall.S_IFBLK != 0 || - s.Mode&syscall.S_IFCHR != 0 { - hdr.Devmajor = int64(major(uint64(s.Rdev))) - hdr.Devminor = int64(minor(uint64(s.Rdev))) - } - - return -} - -func getFileUIDGID(stat interface{}) (int, int, error) { - s, ok := stat.(*syscall.Stat_t) - - if !ok { - return -1, -1, errors.New("cannot convert stat value to syscall.Stat_t") - } - return int(s.Uid), int(s.Gid), nil -} - -func major(device uint64) uint64 { - return (device >> 8) & 0xfff -} - -func minor(device uint64) uint64 { - return (device & 0xff) | ((device >> 12) & 0xfff00) -} - -// handleTarTypeBlockCharFifo is an OS-specific helper function used by -// createTarFile to handle the following types of header: Block; Char; Fifo -func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { - mode := uint32(hdr.Mode & 07777) - switch hdr.Typeflag { - case tar.TypeBlock: - mode |= syscall.S_IFBLK - case tar.TypeChar: - mode |= syscall.S_IFCHR - case tar.TypeFifo: - mode |= syscall.S_IFIFO - } - - if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil { - return err - } - return nil -} - -func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { - if hdr.Typeflag == tar.TypeLink { - if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { - if err := os.Chmod(path, hdrInfo.Mode()); err != nil { - return err - } - } - } else if hdr.Typeflag != tar.TypeSymlink { - if err := os.Chmod(path, hdrInfo.Mode()); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/archive/archive_windows.go b/vendor/github.com/hyperhq/hypercli/pkg/archive/archive_windows.go deleted file mode 100644 index 2d36d4031..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/archive/archive_windows.go +++ /dev/null @@ -1,70 +0,0 @@ -// +build windows - -package archive - -import ( - "archive/tar" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/hyperhq/hypercli/pkg/longpath" -) - -// fixVolumePathPrefix does platform specific processing to ensure that if -// the path being passed in is not in a volume path format, convert it to one. -func fixVolumePathPrefix(srcPath string) string { - return longpath.AddPrefix(srcPath) -} - -// getWalkRoot calculates the root path when performing a TarWithOptions. -// We use a separate function as this is platform specific. -func getWalkRoot(srcPath string, include string) string { - return filepath.Join(srcPath, include) -} - -// CanonicalTarNameForPath returns platform-specific filepath -// to canonical posix-style path for tar archival. p is relative -// path. -func CanonicalTarNameForPath(p string) (string, error) { - // windows: convert windows style relative path with backslashes - // into forward slashes. Since windows does not allow '/' or '\' - // in file names, it is mostly safe to replace however we must - // check just in case - if strings.Contains(p, "/") { - return "", fmt.Errorf("Windows path contains forward slash: %s", p) - } - return strings.Replace(p, string(os.PathSeparator), "/", -1), nil - -} - -// chmodTarEntry is used to adjust the file permissions used in tar header based -// on the platform the archival is done. -func chmodTarEntry(perm os.FileMode) os.FileMode { - perm &= 0755 - // Add the x bit: make everything +x from windows - perm |= 0111 - - return perm -} - -func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) { - // do nothing. no notion of Rdev, Inode, Nlink in stat on Windows - return -} - -// handleTarTypeBlockCharFifo is an OS-specific helper function used by -// createTarFile to handle the following types of header: Block; Char; Fifo -func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { - return nil -} - -func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { - return nil -} - -func getFileUIDGID(stat interface{}) (int, int, error) { - // no notion of file ownership mapping yet on Windows - return 0, 0, nil -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/archive/changes.go b/vendor/github.com/hyperhq/hypercli/pkg/archive/changes.go deleted file mode 100644 index d48830ff9..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/archive/changes.go +++ /dev/null @@ -1,416 +0,0 @@ -package archive - -import ( - "archive/tar" - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "sort" - "strings" - "syscall" - "time" - - "github.com/Sirupsen/logrus" - "github.com/hyperhq/hypercli/pkg/idtools" - "github.com/hyperhq/hypercli/pkg/pools" - "github.com/hyperhq/hypercli/pkg/system" -) - -// ChangeType represents the change type. -type ChangeType int - -const ( - // ChangeModify represents the modify operation. - ChangeModify = iota - // ChangeAdd represents the add operation. - ChangeAdd - // ChangeDelete represents the delete operation. - ChangeDelete -) - -func (c ChangeType) String() string { - switch c { - case ChangeModify: - return "C" - case ChangeAdd: - return "A" - case ChangeDelete: - return "D" - } - return "" -} - -// Change represents a change, it wraps the change type and path. -// It describes changes of the files in the path respect to the -// parent layers. The change could be modify, add, delete. -// This is used for layer diff. -type Change struct { - Path string - Kind ChangeType -} - -func (change *Change) String() string { - return fmt.Sprintf("%s %s", change.Kind, change.Path) -} - -// for sort.Sort -type changesByPath []Change - -func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } -func (c changesByPath) Len() int { return len(c) } -func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } - -// Gnu tar and the go tar writer don't have sub-second mtime -// precision, which is problematic when we apply changes via tar -// files, we handle this by comparing for exact times, *or* same -// second count and either a or b having exactly 0 nanoseconds -func sameFsTime(a, b time.Time) bool { - return a == b || - (a.Unix() == b.Unix() && - (a.Nanosecond() == 0 || b.Nanosecond() == 0)) -} - -func sameFsTimeSpec(a, b syscall.Timespec) bool { - return a.Sec == b.Sec && - (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) -} - -// Changes walks the path rw and determines changes for the files in the path, -// with respect to the parent layers -func Changes(layers []string, rw string) ([]Change, error) { - var ( - changes []Change - changedDirs = make(map[string]struct{}) - ) - - err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { - if err != nil { - return err - } - - // Rebase path - path, err = filepath.Rel(rw, path) - if err != nil { - return err - } - - // As this runs on the daemon side, file paths are OS specific. - path = filepath.Join(string(os.PathSeparator), path) - - // Skip root - if path == string(os.PathSeparator) { - return nil - } - - // Skip AUFS metadata - if matched, err := filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path); err != nil || matched { - return err - } - - change := Change{ - Path: path, - } - - // Find out what kind of modification happened - file := filepath.Base(path) - // If there is a whiteout, then the file was removed - if strings.HasPrefix(file, WhiteoutPrefix) { - originalFile := file[len(WhiteoutPrefix):] - change.Path = filepath.Join(filepath.Dir(path), originalFile) - change.Kind = ChangeDelete - } else { - // Otherwise, the file was added - change.Kind = ChangeAdd - - // ...Unless it already existed in a top layer, in which case, it's a modification - for _, layer := range layers { - stat, err := os.Stat(filepath.Join(layer, path)) - if err != nil && !os.IsNotExist(err) { - return err - } - if err == nil { - // The file existed in the top layer, so that's a modification - - // However, if it's a directory, maybe it wasn't actually modified. - // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar - if stat.IsDir() && f.IsDir() { - if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { - // Both directories are the same, don't record the change - return nil - } - } - change.Kind = ChangeModify - break - } - } - } - - // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. - // This block is here to ensure the change is recorded even if the - // modify time, mode and size of the parent directory in the rw and ro layers are all equal. - // Check https://github.com/hyperhq/hypercli/pull/13590 for details. - if f.IsDir() { - changedDirs[path] = struct{}{} - } - if change.Kind == ChangeAdd || change.Kind == ChangeDelete { - parent := filepath.Dir(path) - if _, ok := changedDirs[parent]; !ok && parent != "/" { - changes = append(changes, Change{Path: parent, Kind: ChangeModify}) - changedDirs[parent] = struct{}{} - } - } - - // Record change - changes = append(changes, change) - return nil - }) - if err != nil && !os.IsNotExist(err) { - return nil, err - } - return changes, nil -} - -// FileInfo describes the information of a file. -type FileInfo struct { - parent *FileInfo - name string - stat *system.StatT - children map[string]*FileInfo - capability []byte - added bool -} - -// LookUp looks up the file information of a file. -func (info *FileInfo) LookUp(path string) *FileInfo { - // As this runs on the daemon side, file paths are OS specific. - parent := info - if path == string(os.PathSeparator) { - return info - } - - pathElements := strings.Split(path, string(os.PathSeparator)) - for _, elem := range pathElements { - if elem != "" { - child := parent.children[elem] - if child == nil { - return nil - } - parent = child - } - } - return parent -} - -func (info *FileInfo) path() string { - if info.parent == nil { - // As this runs on the daemon side, file paths are OS specific. - return string(os.PathSeparator) - } - return filepath.Join(info.parent.path(), info.name) -} - -func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { - - sizeAtEntry := len(*changes) - - if oldInfo == nil { - // add - change := Change{ - Path: info.path(), - Kind: ChangeAdd, - } - *changes = append(*changes, change) - info.added = true - } - - // We make a copy so we can modify it to detect additions - // also, we only recurse on the old dir if the new info is a directory - // otherwise any previous delete/change is considered recursive - oldChildren := make(map[string]*FileInfo) - if oldInfo != nil && info.isDir() { - for k, v := range oldInfo.children { - oldChildren[k] = v - } - } - - for name, newChild := range info.children { - oldChild, _ := oldChildren[name] - if oldChild != nil { - // change? - oldStat := oldChild.stat - newStat := newChild.stat - // Note: We can't compare inode or ctime or blocksize here, because these change - // when copying a file into a container. However, that is not generally a problem - // because any content change will change mtime, and any status change should - // be visible when actually comparing the stat fields. The only time this - // breaks down is if some code intentionally hides a change by setting - // back mtime - if statDifferent(oldStat, newStat) || - bytes.Compare(oldChild.capability, newChild.capability) != 0 { - change := Change{ - Path: newChild.path(), - Kind: ChangeModify, - } - *changes = append(*changes, change) - newChild.added = true - } - - // Remove from copy so we can detect deletions - delete(oldChildren, name) - } - - newChild.addChanges(oldChild, changes) - } - for _, oldChild := range oldChildren { - // delete - change := Change{ - Path: oldChild.path(), - Kind: ChangeDelete, - } - *changes = append(*changes, change) - } - - // If there were changes inside this directory, we need to add it, even if the directory - // itself wasn't changed. This is needed to properly save and restore filesystem permissions. - // As this runs on the daemon side, file paths are OS specific. - if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { - change := Change{ - Path: info.path(), - Kind: ChangeModify, - } - // Let's insert the directory entry before the recently added entries located inside this dir - *changes = append(*changes, change) // just to resize the slice, will be overwritten - copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) - (*changes)[sizeAtEntry] = change - } - -} - -// Changes add changes to file information. -func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { - var changes []Change - - info.addChanges(oldInfo, &changes) - - return changes -} - -func newRootFileInfo() *FileInfo { - // As this runs on the daemon side, file paths are OS specific. - root := &FileInfo{ - name: string(os.PathSeparator), - children: make(map[string]*FileInfo), - } - return root -} - -// ChangesDirs compares two directories and generates an array of Change objects describing the changes. -// If oldDir is "", then all files in newDir will be Add-Changes. -func ChangesDirs(newDir, oldDir string) ([]Change, error) { - var ( - oldRoot, newRoot *FileInfo - ) - if oldDir == "" { - emptyDir, err := ioutil.TempDir("", "empty") - if err != nil { - return nil, err - } - defer os.Remove(emptyDir) - oldDir = emptyDir - } - oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir) - if err != nil { - return nil, err - } - - return newRoot.Changes(oldRoot), nil -} - -// ChangesSize calculates the size in bytes of the provided changes, based on newDir. -func ChangesSize(newDir string, changes []Change) int64 { - var ( - size int64 - sf = make(map[uint64]struct{}) - ) - for _, change := range changes { - if change.Kind == ChangeModify || change.Kind == ChangeAdd { - file := filepath.Join(newDir, change.Path) - fileInfo, err := os.Lstat(file) - if err != nil { - logrus.Errorf("Can not stat %q: %s", file, err) - continue - } - - if fileInfo != nil && !fileInfo.IsDir() { - if hasHardlinks(fileInfo) { - inode := getIno(fileInfo) - if _, ok := sf[inode]; !ok { - size += fileInfo.Size() - sf[inode] = struct{}{} - } - } else { - size += fileInfo.Size() - } - } - } - } - return size -} - -// ExportChanges produces an Archive from the provided changes, relative to dir. -func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (Archive, error) { - reader, writer := io.Pipe() - go func() { - ta := &tarAppender{ - TarWriter: tar.NewWriter(writer), - Buffer: pools.BufioWriter32KPool.Get(nil), - SeenFiles: make(map[uint64]string), - UIDMaps: uidMaps, - GIDMaps: gidMaps, - } - // this buffer is needed for the duration of this piped stream - defer pools.BufioWriter32KPool.Put(ta.Buffer) - - sort.Sort(changesByPath(changes)) - - // In general we log errors here but ignore them because - // during e.g. a diff operation the container can continue - // mutating the filesystem and we can see transient errors - // from this - for _, change := range changes { - if change.Kind == ChangeDelete { - whiteOutDir := filepath.Dir(change.Path) - whiteOutBase := filepath.Base(change.Path) - whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase) - timestamp := time.Now() - hdr := &tar.Header{ - Name: whiteOut[1:], - Size: 0, - ModTime: timestamp, - AccessTime: timestamp, - ChangeTime: timestamp, - } - if err := ta.TarWriter.WriteHeader(hdr); err != nil { - logrus.Debugf("Can't write whiteout header: %s", err) - } - } else { - path := filepath.Join(dir, change.Path) - if err := ta.addTarFile(path, change.Path[1:]); err != nil { - logrus.Debugf("Can't add file %s to tar: %s", path, err) - } - } - } - - // Make sure to check the error on Close. - if err := ta.TarWriter.Close(); err != nil { - logrus.Debugf("Can't close layer: %s", err) - } - if err := writer.Close(); err != nil { - logrus.Debugf("failed close Changes writer: %s", err) - } - }() - return reader, nil -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/archive/changes_linux.go b/vendor/github.com/hyperhq/hypercli/pkg/archive/changes_linux.go deleted file mode 100644 index 6e661755f..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/archive/changes_linux.go +++ /dev/null @@ -1,285 +0,0 @@ -package archive - -import ( - "bytes" - "fmt" - "os" - "path/filepath" - "sort" - "syscall" - "unsafe" - - "github.com/hyperhq/hypercli/pkg/system" -) - -// walker is used to implement collectFileInfoForChanges on linux. Where this -// method in general returns the entire contents of two directory trees, we -// optimize some FS calls out on linux. In particular, we take advantage of the -// fact that getdents(2) returns the inode of each file in the directory being -// walked, which, when walking two trees in parallel to generate a list of -// changes, can be used to prune subtrees without ever having to lstat(2) them -// directly. Eliminating stat calls in this way can save up to seconds on large -// images. -type walker struct { - dir1 string - dir2 string - root1 *FileInfo - root2 *FileInfo -} - -// collectFileInfoForChanges returns a complete representation of the trees -// rooted at dir1 and dir2, with one important exception: any subtree or -// leaf where the inode and device numbers are an exact match between dir1 -// and dir2 will be pruned from the results. This method is *only* to be used -// to generating a list of changes between the two directories, as it does not -// reflect the full contents. -func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) { - w := &walker{ - dir1: dir1, - dir2: dir2, - root1: newRootFileInfo(), - root2: newRootFileInfo(), - } - - i1, err := os.Lstat(w.dir1) - if err != nil { - return nil, nil, err - } - i2, err := os.Lstat(w.dir2) - if err != nil { - return nil, nil, err - } - - if err := w.walk("/", i1, i2); err != nil { - return nil, nil, err - } - - return w.root1, w.root2, nil -} - -// Given a FileInfo, its path info, and a reference to the root of the tree -// being constructed, register this file with the tree. -func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { - if fi == nil { - return nil - } - parent := root.LookUp(filepath.Dir(path)) - if parent == nil { - return fmt.Errorf("collectFileInfoForChanges: Unexpectedly no parent for %s", path) - } - info := &FileInfo{ - name: filepath.Base(path), - children: make(map[string]*FileInfo), - parent: parent, - } - cpath := filepath.Join(dir, path) - stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) - if err != nil { - return err - } - info.stat = stat - info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access - parent.children[info.name] = info - return nil -} - -// Walk a subtree rooted at the same path in both trees being iterated. For -// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d -func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { - // Register these nodes with the return trees, unless we're still at the - // (already-created) roots: - if path != "/" { - if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { - return err - } - if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { - return err - } - } - - is1Dir := i1 != nil && i1.IsDir() - is2Dir := i2 != nil && i2.IsDir() - - sameDevice := false - if i1 != nil && i2 != nil { - si1 := i1.Sys().(*syscall.Stat_t) - si2 := i2.Sys().(*syscall.Stat_t) - if si1.Dev == si2.Dev { - sameDevice = true - } - } - - // If these files are both non-existent, or leaves (non-dirs), we are done. - if !is1Dir && !is2Dir { - return nil - } - - // Fetch the names of all the files contained in both directories being walked: - var names1, names2 []nameIno - if is1Dir { - names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access - if err != nil { - return err - } - } - if is2Dir { - names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access - if err != nil { - return err - } - } - - // We have lists of the files contained in both parallel directories, sorted - // in the same order. Walk them in parallel, generating a unique merged list - // of all items present in either or both directories. - var names []string - ix1 := 0 - ix2 := 0 - - for { - if ix1 >= len(names1) { - break - } - if ix2 >= len(names2) { - break - } - - ni1 := names1[ix1] - ni2 := names2[ix2] - - switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) { - case -1: // ni1 < ni2 -- advance ni1 - // we will not encounter ni1 in names2 - names = append(names, ni1.name) - ix1++ - case 0: // ni1 == ni2 - if ni1.ino != ni2.ino || !sameDevice { - names = append(names, ni1.name) - } - ix1++ - ix2++ - case 1: // ni1 > ni2 -- advance ni2 - // we will not encounter ni2 in names1 - names = append(names, ni2.name) - ix2++ - } - } - for ix1 < len(names1) { - names = append(names, names1[ix1].name) - ix1++ - } - for ix2 < len(names2) { - names = append(names, names2[ix2].name) - ix2++ - } - - // For each of the names present in either or both of the directories being - // iterated, stat the name under each root, and recurse the pair of them: - for _, name := range names { - fname := filepath.Join(path, name) - var cInfo1, cInfo2 os.FileInfo - if is1Dir { - cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access - if err != nil && !os.IsNotExist(err) { - return err - } - } - if is2Dir { - cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access - if err != nil && !os.IsNotExist(err) { - return err - } - } - if err = w.walk(fname, cInfo1, cInfo2); err != nil { - return err - } - } - return nil -} - -// {name,inode} pairs used to support the early-pruning logic of the walker type -type nameIno struct { - name string - ino uint64 -} - -type nameInoSlice []nameIno - -func (s nameInoSlice) Len() int { return len(s) } -func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } - -// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode -// numbers further up the stack when reading directory contents. Unlike -// os.Readdirnames, which returns a list of filenames, this function returns a -// list of {filename,inode} pairs. -func readdirnames(dirname string) (names []nameIno, err error) { - var ( - size = 100 - buf = make([]byte, 4096) - nbuf int - bufp int - nb int - ) - - f, err := os.Open(dirname) - if err != nil { - return nil, err - } - defer f.Close() - - names = make([]nameIno, 0, size) // Empty with room to grow. - for { - // Refill the buffer if necessary - if bufp >= nbuf { - bufp = 0 - nbuf, err = syscall.ReadDirent(int(f.Fd()), buf) // getdents on linux - if nbuf < 0 { - nbuf = 0 - } - if err != nil { - return nil, os.NewSyscallError("readdirent", err) - } - if nbuf <= 0 { - break // EOF - } - } - - // Drain the buffer - nb, names = parseDirent(buf[bufp:nbuf], names) - bufp += nb - } - - sl := nameInoSlice(names) - sort.Sort(sl) - return sl, nil -} - -// parseDirent is a minor modification of syscall.ParseDirent (linux version) -// which returns {name,inode} pairs instead of just names. -func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { - origlen := len(buf) - for len(buf) > 0 { - dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0])) - buf = buf[dirent.Reclen:] - if dirent.Ino == 0 { // File absent in directory. - continue - } - bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) - var name = string(bytes[0:clen(bytes[:])]) - if name == "." || name == ".." { // Useless names - continue - } - names = append(names, nameIno{name, dirent.Ino}) - } - return origlen - len(buf), names -} - -func clen(n []byte) int { - for i := 0; i < len(n); i++ { - if n[i] == 0 { - return i - } - } - return len(n) -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/archive/changes_other.go b/vendor/github.com/hyperhq/hypercli/pkg/archive/changes_other.go deleted file mode 100644 index 2f57acecd..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/archive/changes_other.go +++ /dev/null @@ -1,97 +0,0 @@ -// +build !linux - -package archive - -import ( - "fmt" - "os" - "path/filepath" - "runtime" - "strings" - - "github.com/hyperhq/hypercli/pkg/system" -) - -func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) { - var ( - oldRoot, newRoot *FileInfo - err1, err2 error - errs = make(chan error, 2) - ) - go func() { - oldRoot, err1 = collectFileInfo(oldDir) - errs <- err1 - }() - go func() { - newRoot, err2 = collectFileInfo(newDir) - errs <- err2 - }() - - // block until both routines have returned - for i := 0; i < 2; i++ { - if err := <-errs; err != nil { - return nil, nil, err - } - } - - return oldRoot, newRoot, nil -} - -func collectFileInfo(sourceDir string) (*FileInfo, error) { - root := newRootFileInfo() - - err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { - if err != nil { - return err - } - - // Rebase path - relPath, err := filepath.Rel(sourceDir, path) - if err != nil { - return err - } - - // As this runs on the daemon side, file paths are OS specific. - relPath = filepath.Join(string(os.PathSeparator), relPath) - - // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. - // Temporary workaround. If the returned path starts with two backslashes, - // trim it down to a single backslash. Only relevant on Windows. - if runtime.GOOS == "windows" { - if strings.HasPrefix(relPath, `\\`) { - relPath = relPath[1:] - } - } - - if relPath == string(os.PathSeparator) { - return nil - } - - parent := root.LookUp(filepath.Dir(relPath)) - if parent == nil { - return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) - } - - info := &FileInfo{ - name: filepath.Base(relPath), - children: make(map[string]*FileInfo), - parent: parent, - } - - s, err := system.Lstat(path) - if err != nil { - return err - } - info.stat = s - - info.capability, _ = system.Lgetxattr(path, "security.capability") - - parent.children[info.name] = info - - return nil - }) - if err != nil { - return nil, err - } - return root, nil -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/archive/changes_unix.go b/vendor/github.com/hyperhq/hypercli/pkg/archive/changes_unix.go deleted file mode 100644 index d07bc66bc..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/archive/changes_unix.go +++ /dev/null @@ -1,36 +0,0 @@ -// +build !windows - -package archive - -import ( - "os" - "syscall" - - "github.com/hyperhq/hypercli/pkg/system" -) - -func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { - // Don't look at size for dirs, its not a good measure of change - if oldStat.Mode() != newStat.Mode() || - oldStat.UID() != newStat.UID() || - oldStat.GID() != newStat.GID() || - oldStat.Rdev() != newStat.Rdev() || - // Don't look at size for dirs, its not a good measure of change - (oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR && - (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { - return true - } - return false -} - -func (info *FileInfo) isDir() bool { - return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0 -} - -func getIno(fi os.FileInfo) uint64 { - return uint64(fi.Sys().(*syscall.Stat_t).Ino) -} - -func hasHardlinks(fi os.FileInfo) bool { - return fi.Sys().(*syscall.Stat_t).Nlink > 1 -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/archive/changes_windows.go b/vendor/github.com/hyperhq/hypercli/pkg/archive/changes_windows.go deleted file mode 100644 index f7c972a70..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/archive/changes_windows.go +++ /dev/null @@ -1,30 +0,0 @@ -package archive - -import ( - "os" - - "github.com/hyperhq/hypercli/pkg/system" -) - -func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { - - // Don't look at size for dirs, its not a good measure of change - if oldStat.ModTime() != newStat.ModTime() || - oldStat.Mode() != newStat.Mode() || - oldStat.Size() != newStat.Size() && !oldStat.IsDir() { - return true - } - return false -} - -func (info *FileInfo) isDir() bool { - return info.parent == nil || info.stat.IsDir() -} - -func getIno(fi os.FileInfo) (inode uint64) { - return -} - -func hasHardlinks(fi os.FileInfo) bool { - return false -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/archive/copy.go b/vendor/github.com/hyperhq/hypercli/pkg/archive/copy.go deleted file mode 100644 index f7925dd85..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/archive/copy.go +++ /dev/null @@ -1,458 +0,0 @@ -package archive - -import ( - "archive/tar" - "errors" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/hyperhq/hypercli/pkg/system" -) - -// Errors used or returned by this file. -var ( - ErrNotDirectory = errors.New("not a directory") - ErrDirNotExists = errors.New("no such directory") - ErrCannotCopyDir = errors.New("cannot copy directory") - ErrInvalidCopySource = errors.New("invalid copy source content") -) - -// PreserveTrailingDotOrSeparator returns the given cleaned path (after -// processing using any utility functions from the path or filepath stdlib -// packages) and appends a trailing `/.` or `/` if its corresponding original -// path (from before being processed by utility functions from the path or -// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned -// path already ends in a `.` path segment, then another is not added. If the -// clean path already ends in a path separator, then another is not added. -func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string { - // Ensure paths are in platform semantics - cleanedPath = normalizePath(cleanedPath) - originalPath = normalizePath(originalPath) - - if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { - if !hasTrailingPathSeparator(cleanedPath) { - // Add a separator if it doesn't already end with one (a cleaned - // path would only end in a separator if it is the root). - cleanedPath += string(filepath.Separator) - } - cleanedPath += "." - } - - if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) { - cleanedPath += string(filepath.Separator) - } - - return cleanedPath -} - -// assertsDirectory returns whether the given path is -// asserted to be a directory, i.e., the path ends with -// a trailing '/' or `/.`, assuming a path separator of `/`. -func assertsDirectory(path string) bool { - return hasTrailingPathSeparator(path) || specifiesCurrentDir(path) -} - -// hasTrailingPathSeparator returns whether the given -// path ends with the system's path separator character. -func hasTrailingPathSeparator(path string) bool { - return len(path) > 0 && os.IsPathSeparator(path[len(path)-1]) -} - -// specifiesCurrentDir returns whether the given path specifies -// a "current directory", i.e., the last path segment is `.`. -func specifiesCurrentDir(path string) bool { - return filepath.Base(path) == "." -} - -// SplitPathDirEntry splits the given path between its directory name and its -// basename by first cleaning the path but preserves a trailing "." if the -// original path specified the current directory. -func SplitPathDirEntry(path string) (dir, base string) { - cleanedPath := filepath.Clean(normalizePath(path)) - - if specifiesCurrentDir(path) { - cleanedPath += string(filepath.Separator) + "." - } - - return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) -} - -// TarResource archives the resource described by the given CopyInfo to a Tar -// archive. A non-nil error is returned if sourcePath does not exist or is -// asserted to be a directory but exists as another type of file. -// -// This function acts as a convenient wrapper around TarWithOptions, which -// requires a directory as the source path. TarResource accepts either a -// directory or a file path and correctly sets the Tar options. -func TarResource(sourceInfo CopyInfo) (content Archive, err error) { - return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) -} - -// TarResourceRebase is like TarResource but renames the first path element of -// items in the resulting tar archive to match the given rebaseName if not "". -func TarResourceRebase(sourcePath, rebaseName string) (content Archive, err error) { - sourcePath = normalizePath(sourcePath) - if _, err = os.Lstat(sourcePath); err != nil { - // Catches the case where the source does not exist or is not a - // directory if asserted to be a directory, as this also causes an - // error. - return - } - - // Separate the source path between it's directory and - // the entry in that directory which we are archiving. - sourceDir, sourceBase := SplitPathDirEntry(sourcePath) - - filter := []string{sourceBase} - - logrus.Debugf("copying %q from %q", sourceBase, sourceDir) - - return TarWithOptions(sourceDir, &TarOptions{ - Compression: Uncompressed, - IncludeFiles: filter, - IncludeSourceDir: true, - RebaseNames: map[string]string{ - sourceBase: rebaseName, - }, - }) -} - -// CopyInfo holds basic info about the source -// or destination path of a copy operation. -type CopyInfo struct { - Path string - Exists bool - IsDir bool - RebaseName string -} - -// CopyInfoSourcePath stats the given path to create a CopyInfo -// struct representing that resource for the source of an archive copy -// operation. The given path should be an absolute local path. A source path -// has all symlinks evaluated that appear before the last path separator ("/" -// on Unix). As it is to be a copy source, the path must exist. -func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) { - // normalize the file path and then evaluate the symbol link - // we will use the target file instead of the symbol link if - // followLink is set - path = normalizePath(path) - - resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink) - if err != nil { - return CopyInfo{}, err - } - - stat, err := os.Lstat(resolvedPath) - if err != nil { - return CopyInfo{}, err - } - - return CopyInfo{ - Path: resolvedPath, - Exists: true, - IsDir: stat.IsDir(), - RebaseName: rebaseName, - }, nil -} - -// CopyInfoDestinationPath stats the given path to create a CopyInfo -// struct representing that resource for the destination of an archive copy -// operation. The given path should be an absolute local path. -func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { - maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot. - path = normalizePath(path) - originalPath := path - - stat, err := os.Lstat(path) - - if err == nil && stat.Mode()&os.ModeSymlink == 0 { - // The path exists and is not a symlink. - return CopyInfo{ - Path: path, - Exists: true, - IsDir: stat.IsDir(), - }, nil - } - - // While the path is a symlink. - for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ { - if n > maxSymlinkIter { - // Don't follow symlinks more than this arbitrary number of times. - return CopyInfo{}, errors.New("too many symlinks in " + originalPath) - } - - // The path is a symbolic link. We need to evaluate it so that the - // destination of the copy operation is the link target and not the - // link itself. This is notably different than CopyInfoSourcePath which - // only evaluates symlinks before the last appearing path separator. - // Also note that it is okay if the last path element is a broken - // symlink as the copy operation should create the target. - var linkTarget string - - linkTarget, err = os.Readlink(path) - if err != nil { - return CopyInfo{}, err - } - - if !system.IsAbs(linkTarget) { - // Join with the parent directory. - dstParent, _ := SplitPathDirEntry(path) - linkTarget = filepath.Join(dstParent, linkTarget) - } - - path = linkTarget - stat, err = os.Lstat(path) - } - - if err != nil { - // It's okay if the destination path doesn't exist. We can still - // continue the copy operation if the parent directory exists. - if !os.IsNotExist(err) { - return CopyInfo{}, err - } - - // Ensure destination parent dir exists. - dstParent, _ := SplitPathDirEntry(path) - - parentDirStat, err := os.Lstat(dstParent) - if err != nil { - return CopyInfo{}, err - } - if !parentDirStat.IsDir() { - return CopyInfo{}, ErrNotDirectory - } - - return CopyInfo{Path: path}, nil - } - - // The path exists after resolving symlinks. - return CopyInfo{ - Path: path, - Exists: true, - IsDir: stat.IsDir(), - }, nil -} - -// PrepareArchiveCopy prepares the given srcContent archive, which should -// contain the archived resource described by srcInfo, to the destination -// described by dstInfo. Returns the possibly modified content archive along -// with the path to the destination directory which it should be extracted to. -func PrepareArchiveCopy(srcContent Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content Archive, err error) { - // Ensure in platform semantics - srcInfo.Path = normalizePath(srcInfo.Path) - dstInfo.Path = normalizePath(dstInfo.Path) - - // Separate the destination path between its directory and base - // components in case the source archive contents need to be rebased. - dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) - _, srcBase := SplitPathDirEntry(srcInfo.Path) - - switch { - case dstInfo.Exists && dstInfo.IsDir: - // The destination exists as a directory. No alteration - // to srcContent is needed as its contents can be - // simply extracted to the destination directory. - return dstInfo.Path, ioutil.NopCloser(srcContent), nil - case dstInfo.Exists && srcInfo.IsDir: - // The destination exists as some type of file and the source - // content is a directory. This is an error condition since - // you cannot copy a directory to an existing file location. - return "", nil, ErrCannotCopyDir - case dstInfo.Exists: - // The destination exists as some type of file and the source content - // is also a file. The source content entry will have to be renamed to - // have a basename which matches the destination path's basename. - if len(srcInfo.RebaseName) != 0 { - srcBase = srcInfo.RebaseName - } - return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil - case srcInfo.IsDir: - // The destination does not exist and the source content is an archive - // of a directory. The archive should be extracted to the parent of - // the destination path instead, and when it is, the directory that is - // created as a result should take the name of the destination path. - // The source content entries will have to be renamed to have a - // basename which matches the destination path's basename. - if len(srcInfo.RebaseName) != 0 { - srcBase = srcInfo.RebaseName - } - return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil - case assertsDirectory(dstInfo.Path): - // The destination does not exist and is asserted to be created as a - // directory, but the source content is not a directory. This is an - // error condition since you cannot create a directory from a file - // source. - return "", nil, ErrDirNotExists - default: - // The last remaining case is when the destination does not exist, is - // not asserted to be a directory, and the source content is not an - // archive of a directory. It this case, the destination file will need - // to be created when the archive is extracted and the source content - // entry will have to be renamed to have a basename which matches the - // destination path's basename. - if len(srcInfo.RebaseName) != 0 { - srcBase = srcInfo.RebaseName - } - return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil - } - -} - -// RebaseArchiveEntries rewrites the given srcContent archive replacing -// an occurrence of oldBase with newBase at the beginning of entry names. -func RebaseArchiveEntries(srcContent Reader, oldBase, newBase string) Archive { - if oldBase == string(os.PathSeparator) { - // If oldBase specifies the root directory, use an empty string as - // oldBase instead so that newBase doesn't replace the path separator - // that all paths will start with. - oldBase = "" - } - - rebased, w := io.Pipe() - - go func() { - srcTar := tar.NewReader(srcContent) - rebasedTar := tar.NewWriter(w) - - for { - hdr, err := srcTar.Next() - if err == io.EOF { - // Signals end of archive. - rebasedTar.Close() - w.Close() - return - } - if err != nil { - w.CloseWithError(err) - return - } - - hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) - - if err = rebasedTar.WriteHeader(hdr); err != nil { - w.CloseWithError(err) - return - } - - if _, err = io.Copy(rebasedTar, srcTar); err != nil { - w.CloseWithError(err) - return - } - } - }() - - return rebased -} - -// CopyResource performs an archive copy from the given source path to the -// given destination path. The source path MUST exist and the destination -// path's parent directory must exist. -func CopyResource(srcPath, dstPath string, followLink bool) error { - var ( - srcInfo CopyInfo - err error - ) - - // Ensure in platform semantics - srcPath = normalizePath(srcPath) - dstPath = normalizePath(dstPath) - - // Clean the source and destination paths. - srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath) - dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath) - - if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil { - return err - } - - content, err := TarResource(srcInfo) - if err != nil { - return err - } - defer content.Close() - - return CopyTo(content, srcInfo, dstPath) -} - -// CopyTo handles extracting the given content whose -// entries should be sourced from srcInfo to dstPath. -func CopyTo(content Reader, srcInfo CopyInfo, dstPath string) error { - // The destination path need not exist, but CopyInfoDestinationPath will - // ensure that at least the parent directory exists. - dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) - if err != nil { - return err - } - - dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) - if err != nil { - return err - } - defer copyArchive.Close() - - options := &TarOptions{ - NoLchown: true, - NoOverwriteDirNonDir: true, - } - - return Untar(copyArchive, dstDir, options) -} - -// ResolveHostSourcePath decides real path need to be copied with parameters such as -// whether to follow symbol link or not, if followLink is true, resolvedPath will return -// link target of any symbol link file, else it will only resolve symlink of directory -// but return symbol link file itself without resolving. -func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) { - if followLink { - resolvedPath, err = filepath.EvalSymlinks(path) - if err != nil { - return - } - - resolvedPath, rebaseName = GetRebaseName(path, resolvedPath) - } else { - dirPath, basePath := filepath.Split(path) - - // if not follow symbol link, then resolve symbol link of parent dir - var resolvedDirPath string - resolvedDirPath, err = filepath.EvalSymlinks(dirPath) - if err != nil { - return - } - // resolvedDirPath will have been cleaned (no trailing path separators) so - // we can manually join it with the base path element. - resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath - if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) { - rebaseName = filepath.Base(path) - } - } - return resolvedPath, rebaseName, nil -} - -// GetRebaseName normalizes and compares path and resolvedPath, -// return completed resolved path and rebased file name -func GetRebaseName(path, resolvedPath string) (string, string) { - // linkTarget will have been cleaned (no trailing path separators and dot) so - // we can manually join it with them - var rebaseName string - if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) { - resolvedPath += string(filepath.Separator) + "." - } - - if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) { - resolvedPath += string(filepath.Separator) - } - - if filepath.Base(path) != filepath.Base(resolvedPath) { - // In the case where the path had a trailing separator and a symlink - // evaluation has changed the last path component, we will need to - // rebase the name in the archive that is being copied to match the - // originally requested name. - rebaseName = filepath.Base(path) - } - return resolvedPath, rebaseName -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/archive/copy_unix.go b/vendor/github.com/hyperhq/hypercli/pkg/archive/copy_unix.go deleted file mode 100644 index e305b5e4a..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/archive/copy_unix.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !windows - -package archive - -import ( - "path/filepath" -) - -func normalizePath(path string) string { - return filepath.ToSlash(path) -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/archive/copy_windows.go b/vendor/github.com/hyperhq/hypercli/pkg/archive/copy_windows.go deleted file mode 100644 index 2b775b45c..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/archive/copy_windows.go +++ /dev/null @@ -1,9 +0,0 @@ -package archive - -import ( - "path/filepath" -) - -func normalizePath(path string) string { - return filepath.FromSlash(path) -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/archive/diff.go b/vendor/github.com/hyperhq/hypercli/pkg/archive/diff.go deleted file mode 100644 index 089cecaf0..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/archive/diff.go +++ /dev/null @@ -1,279 +0,0 @@ -package archive - -import ( - "archive/tar" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/hyperhq/hypercli/pkg/idtools" - "github.com/hyperhq/hypercli/pkg/pools" - "github.com/hyperhq/hypercli/pkg/system" -) - -// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be -// compressed or uncompressed. -// Returns the size in bytes of the contents of the layer. -func UnpackLayer(dest string, layer Reader, options *TarOptions) (size int64, err error) { - tr := tar.NewReader(layer) - trBuf := pools.BufioReader32KPool.Get(tr) - defer pools.BufioReader32KPool.Put(trBuf) - - var dirs []*tar.Header - unpackedPaths := make(map[string]struct{}) - - if options == nil { - options = &TarOptions{} - } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } - remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) - if err != nil { - return 0, err - } - - aufsTempdir := "" - aufsHardlinks := make(map[string]*tar.Header) - - if options == nil { - options = &TarOptions{} - } - // Iterate through the files in the archive. - for { - hdr, err := tr.Next() - if err == io.EOF { - // end of tar archive - break - } - if err != nil { - return 0, err - } - - size += hdr.Size - - // Normalize name, for safety and for a simple is-root check - hdr.Name = filepath.Clean(hdr.Name) - - // Windows does not support filenames with colons in them. Ignore - // these files. This is not a problem though (although it might - // appear that it is). Let's suppose a client is running docker pull. - // The daemon it points to is Windows. Would it make sense for the - // client to be doing a docker pull Ubuntu for example (which has files - // with colons in the name under /usr/share/man/man3)? No, absolutely - // not as it would really only make sense that they were pulling a - // Windows image. However, for development, it is necessary to be able - // to pull Linux images which are in the repository. - // - // TODO Windows. Once the registry is aware of what images are Windows- - // specific or Linux-specific, this warning should be changed to an error - // to cater for the situation where someone does manage to upload a Linux - // image but have it tagged as Windows inadvertently. - if runtime.GOOS == "windows" { - if strings.Contains(hdr.Name, ":") { - logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) - continue - } - } - - // Note as these operations are platform specific, so must the slash be. - if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { - // Not the root directory, ensure that the parent directory exists. - // This happened in some tests where an image had a tarfile without any - // parent directories. - parent := filepath.Dir(hdr.Name) - parentPath := filepath.Join(dest, parent) - - if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = system.MkdirAll(parentPath, 0600) - if err != nil { - return 0, err - } - } - } - - // Skip AUFS metadata dirs - if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) { - // Regular files inside /.wh..wh.plnk can be used as hardlink targets - // We don't want this directory, but we need the files in them so that - // such hardlinks can be resolved. - if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { - basename := filepath.Base(hdr.Name) - aufsHardlinks[basename] = hdr - if aufsTempdir == "" { - if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { - return 0, err - } - defer os.RemoveAll(aufsTempdir) - } - if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil); err != nil { - return 0, err - } - } - - if hdr.Name != WhiteoutOpaqueDir { - continue - } - } - path := filepath.Join(dest, hdr.Name) - rel, err := filepath.Rel(dest, path) - if err != nil { - return 0, err - } - - // Note as these operations are platform specific, so must the slash be. - if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { - return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) - } - base := filepath.Base(path) - - if strings.HasPrefix(base, WhiteoutPrefix) { - dir := filepath.Dir(path) - if base == WhiteoutOpaqueDir { - _, err := os.Lstat(dir) - if err != nil { - return 0, err - } - err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { - if err != nil { - if os.IsNotExist(err) { - err = nil // parent was deleted - } - return err - } - if path == dir { - return nil - } - if _, exists := unpackedPaths[path]; !exists { - err := os.RemoveAll(path) - return err - } - return nil - }) - if err != nil { - return 0, err - } - } else { - originalBase := base[len(WhiteoutPrefix):] - originalPath := filepath.Join(dir, originalBase) - if err := os.RemoveAll(originalPath); err != nil { - return 0, err - } - } - } else { - // If path exits we almost always just want to remove and replace it. - // The only exception is when it is a directory *and* the file from - // the layer is also a directory. Then we want to merge them (i.e. - // just apply the metadata from the layer). - if fi, err := os.Lstat(path); err == nil { - if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { - if err := os.RemoveAll(path); err != nil { - return 0, err - } - } - } - - trBuf.Reset(tr) - srcData := io.Reader(trBuf) - srcHdr := hdr - - // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so - // we manually retarget these into the temporary files we extracted them into - if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) { - linkBasename := filepath.Base(hdr.Linkname) - srcHdr = aufsHardlinks[linkBasename] - if srcHdr == nil { - return 0, fmt.Errorf("Invalid aufs hardlink") - } - tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) - if err != nil { - return 0, err - } - defer tmpFile.Close() - srcData = tmpFile - } - - // if the options contain a uid & gid maps, convert header uid/gid - // entries using the maps such that lchown sets the proper mapped - // uid/gid after writing the file. We only perform this mapping if - // the file isn't already owned by the remapped root UID or GID, as - // that specific uid/gid has no mapping from container -> host, and - // those files already have the proper ownership for inside the - // container. - if srcHdr.Uid != remappedRootUID { - xUID, err := idtools.ToHost(srcHdr.Uid, options.UIDMaps) - if err != nil { - return 0, err - } - srcHdr.Uid = xUID - } - if srcHdr.Gid != remappedRootGID { - xGID, err := idtools.ToHost(srcHdr.Gid, options.GIDMaps) - if err != nil { - return 0, err - } - srcHdr.Gid = xGID - } - if err := createTarFile(path, dest, srcHdr, srcData, true, nil); err != nil { - return 0, err - } - - // Directory mtimes must be handled at the end to avoid further - // file creation in them to modify the directory mtime - if hdr.Typeflag == tar.TypeDir { - dirs = append(dirs, hdr) - } - unpackedPaths[path] = struct{}{} - } - } - - for _, hdr := range dirs { - path := filepath.Join(dest, hdr.Name) - if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { - return 0, err - } - } - - return size, nil -} - -// ApplyLayer parses a diff in the standard layer format from `layer`, -// and applies it to the directory `dest`. The stream `layer` can be -// compressed or uncompressed. -// Returns the size in bytes of the contents of the layer. -func ApplyLayer(dest string, layer Reader) (int64, error) { - return applyLayerHandler(dest, layer, &TarOptions{}, true) -} - -// ApplyUncompressedLayer parses a diff in the standard layer format from -// `layer`, and applies it to the directory `dest`. The stream `layer` -// can only be uncompressed. -// Returns the size in bytes of the contents of the layer. -func ApplyUncompressedLayer(dest string, layer Reader, options *TarOptions) (int64, error) { - return applyLayerHandler(dest, layer, options, false) -} - -// do the bulk load of ApplyLayer, but allow for not calling DecompressStream -func applyLayerHandler(dest string, layer Reader, options *TarOptions, decompress bool) (int64, error) { - dest = filepath.Clean(dest) - - // We need to be able to set any perms - oldmask, err := system.Umask(0) - if err != nil { - return 0, err - } - defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform - - if decompress { - layer, err = DecompressStream(layer) - if err != nil { - return 0, err - } - } - return UnpackLayer(dest, layer, options) -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/archive/example_changes.go b/vendor/github.com/hyperhq/hypercli/pkg/archive/example_changes.go deleted file mode 100644 index d407f4670..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/archive/example_changes.go +++ /dev/null @@ -1,97 +0,0 @@ -// +build ignore - -// Simple tool to create an archive stream from an old and new directory -// -// By default it will stream the comparison of two temporary directories with junk files -package main - -import ( - "flag" - "fmt" - "io" - "io/ioutil" - "os" - "path" - - "github.com/Sirupsen/logrus" - "github.com/hyperhq/hypercli/pkg/archive" -) - -var ( - flDebug = flag.Bool("D", false, "debugging output") - flNewDir = flag.String("newdir", "", "") - flOldDir = flag.String("olddir", "", "") - log = logrus.New() -) - -func main() { - flag.Usage = func() { - fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)") - fmt.Printf("%s [OPTIONS]\n", os.Args[0]) - flag.PrintDefaults() - } - flag.Parse() - log.Out = os.Stderr - if (len(os.Getenv("DEBUG")) > 0) || *flDebug { - logrus.SetLevel(logrus.DebugLevel) - } - var newDir, oldDir string - - if len(*flNewDir) == 0 { - var err error - newDir, err = ioutil.TempDir("", "docker-test-newDir") - if err != nil { - log.Fatal(err) - } - defer os.RemoveAll(newDir) - if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil { - log.Fatal(err) - } - } else { - newDir = *flNewDir - } - - if len(*flOldDir) == 0 { - oldDir, err := ioutil.TempDir("", "docker-test-oldDir") - if err != nil { - log.Fatal(err) - } - defer os.RemoveAll(oldDir) - } else { - oldDir = *flOldDir - } - - changes, err := archive.ChangesDirs(newDir, oldDir) - if err != nil { - log.Fatal(err) - } - - a, err := archive.ExportChanges(newDir, changes) - if err != nil { - log.Fatal(err) - } - defer a.Close() - - i, err := io.Copy(os.Stdout, a) - if err != nil && err != io.EOF { - log.Fatal(err) - } - fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i) -} - -func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { - fileData := []byte("fooo") - for n := 0; n < numberOfFiles; n++ { - fileName := fmt.Sprintf("file-%d", n) - if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { - return 0, err - } - if makeLinks { - if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { - return 0, err - } - } - } - totalSize := numberOfFiles * len(fileData) - return totalSize, nil -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/archive/time_linux.go b/vendor/github.com/hyperhq/hypercli/pkg/archive/time_linux.go deleted file mode 100644 index 3448569b1..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/archive/time_linux.go +++ /dev/null @@ -1,16 +0,0 @@ -package archive - -import ( - "syscall" - "time" -) - -func timeToTimespec(time time.Time) (ts syscall.Timespec) { - if time.IsZero() { - // Return UTIME_OMIT special value - ts.Sec = 0 - ts.Nsec = ((1 << 30) - 2) - return - } - return syscall.NsecToTimespec(time.UnixNano()) -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/archive/time_unsupported.go b/vendor/github.com/hyperhq/hypercli/pkg/archive/time_unsupported.go deleted file mode 100644 index e85aac054..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/archive/time_unsupported.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build !linux - -package archive - -import ( - "syscall" - "time" -) - -func timeToTimespec(time time.Time) (ts syscall.Timespec) { - nsec := int64(0) - if !time.IsZero() { - nsec = time.UnixNano() - } - return syscall.NsecToTimespec(nsec) -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/archive/whiteouts.go b/vendor/github.com/hyperhq/hypercli/pkg/archive/whiteouts.go deleted file mode 100644 index d20478a10..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/archive/whiteouts.go +++ /dev/null @@ -1,23 +0,0 @@ -package archive - -// Whiteouts are files with a special meaning for the layered filesystem. -// Docker uses AUFS whiteout files inside exported archives. In other -// filesystems these files are generated/handled on tar creation/extraction. - -// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a -// filename this means that file has been removed from the base layer. -const WhiteoutPrefix = ".wh." - -// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not -// for removing an actual file. Normally these files are excluded from exported -// archives. -const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix - -// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other -// layers. Normally these should not go into exported archives and all changed -// hardlinks should be copied to the top layer. -const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk" - -// WhiteoutOpaqueDir file means directory has been made opaque - meaning -// readdir calls to this directory do not follow to lower layers. -const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq" diff --git a/vendor/github.com/hyperhq/hypercli/pkg/archive/wrap.go b/vendor/github.com/hyperhq/hypercli/pkg/archive/wrap.go deleted file mode 100644 index dfb335c0b..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/archive/wrap.go +++ /dev/null @@ -1,59 +0,0 @@ -package archive - -import ( - "archive/tar" - "bytes" - "io/ioutil" -) - -// Generate generates a new archive from the content provided -// as input. -// -// `files` is a sequence of path/content pairs. A new file is -// added to the archive for each pair. -// If the last pair is incomplete, the file is created with an -// empty content. For example: -// -// Generate("foo.txt", "hello world", "emptyfile") -// -// The above call will return an archive with 2 files: -// * ./foo.txt with content "hello world" -// * ./empty with empty content -// -// FIXME: stream content instead of buffering -// FIXME: specify permissions and other archive metadata -func Generate(input ...string) (Archive, error) { - files := parseStringPairs(input...) - buf := new(bytes.Buffer) - tw := tar.NewWriter(buf) - for _, file := range files { - name, content := file[0], file[1] - hdr := &tar.Header{ - Name: name, - Size: int64(len(content)), - } - if err := tw.WriteHeader(hdr); err != nil { - return nil, err - } - if _, err := tw.Write([]byte(content)); err != nil { - return nil, err - } - } - if err := tw.Close(); err != nil { - return nil, err - } - return ioutil.NopCloser(buf), nil -} - -func parseStringPairs(input ...string) (output [][2]string) { - output = make([][2]string, 0, len(input)/2+1) - for i := 0; i < len(input); i += 2 { - var pair [2]string - pair[0] = input[i] - if i+1 < len(input) { - pair[1] = input[i+1] - } - output = append(output, pair) - } - return -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/chrootarchive/archive.go b/vendor/github.com/hyperhq/hypercli/pkg/chrootarchive/archive.go deleted file mode 100644 index 419533d69..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/chrootarchive/archive.go +++ /dev/null @@ -1,97 +0,0 @@ -package chrootarchive - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - - "github.com/hyperhq/hypercli/pkg/archive" - "github.com/hyperhq/hypercli/pkg/idtools" -) - -var chrootArchiver = &archive.Archiver{Untar: Untar} - -// Untar reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive may be compressed with one of the following algorithms: -// identity (uncompressed), gzip, bzip2, xz. -func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { - return untarHandler(tarArchive, dest, options, true) -} - -// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive must be an uncompressed stream. -func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error { - return untarHandler(tarArchive, dest, options, false) -} - -// Handler for teasing out the automatic decompression -func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error { - - if tarArchive == nil { - return fmt.Errorf("Empty archive") - } - if options == nil { - options = &archive.TarOptions{} - } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } - - rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) - if err != nil { - return err - } - - dest = filepath.Clean(dest) - if _, err := os.Stat(dest); os.IsNotExist(err) { - if err := idtools.MkdirAllNewAs(dest, 0755, rootUID, rootGID); err != nil { - return err - } - } - - r := ioutil.NopCloser(tarArchive) - if decompress { - decompressedArchive, err := archive.DecompressStream(tarArchive) - if err != nil { - return err - } - defer decompressedArchive.Close() - r = decompressedArchive - } - - return invokeUnpack(r, dest, options) -} - -// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. -// If either Tar or Untar fails, TarUntar aborts and returns the error. -func TarUntar(src, dst string) error { - return chrootArchiver.TarUntar(src, dst) -} - -// CopyWithTar creates a tar archive of filesystem path `src`, and -// unpacks it at filesystem path `dst`. -// The archive is streamed directly with fixed buffering and no -// intermediary disk IO. -func CopyWithTar(src, dst string) error { - return chrootArchiver.CopyWithTar(src, dst) -} - -// CopyFileWithTar emulates the behavior of the 'cp' command-line -// for a single file. It copies a regular file from path `src` to -// path `dst`, and preserves all its metadata. -// -// If `dst` ends with a trailing slash '/' ('\' on Windows), the final -// destination path will be `dst/base(src)` or `dst\base(src)` -func CopyFileWithTar(src, dst string) (err error) { - return chrootArchiver.CopyFileWithTar(src, dst) -} - -// UntarPath is a convenience function which looks for an archive -// at filesystem path `src`, and unpacks it at `dst`. -func UntarPath(src, dst string) error { - return chrootArchiver.UntarPath(src, dst) -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/chrootarchive/archive_unix.go b/vendor/github.com/hyperhq/hypercli/pkg/chrootarchive/archive_unix.go deleted file mode 100644 index 9ce931681..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/chrootarchive/archive_unix.go +++ /dev/null @@ -1,91 +0,0 @@ -// +build !windows - -package chrootarchive - -import ( - "bytes" - "encoding/json" - "flag" - "fmt" - "io" - "io/ioutil" - "os" - "runtime" - "syscall" - - "github.com/hyperhq/hypercli/pkg/archive" - "github.com/hyperhq/hypercli/pkg/reexec" -) - -func chroot(path string) error { - if err := syscall.Chroot(path); err != nil { - return err - } - return syscall.Chdir("/") -} - -// untar is the entry-point for docker-untar on re-exec. This is not used on -// Windows as it does not support chroot, hence no point sandboxing through -// chroot and rexec. -func untar() { - runtime.LockOSThread() - flag.Parse() - - var options *archive.TarOptions - - //read the options from the pipe "ExtraFiles" - if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil { - fatal(err) - } - - if err := chroot(flag.Arg(0)); err != nil { - fatal(err) - } - - if err := archive.Unpack(os.Stdin, "/", options); err != nil { - fatal(err) - } - // fully consume stdin in case it is zero padded - flush(os.Stdin) - os.Exit(0) -} - -func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions) error { - - // We can't pass a potentially large exclude list directly via cmd line - // because we easily overrun the kernel's max argument/environment size - // when the full image list is passed (e.g. when this is used by - // `docker load`). We will marshall the options via a pipe to the - // child - r, w, err := os.Pipe() - if err != nil { - return fmt.Errorf("Untar pipe failure: %v", err) - } - - cmd := reexec.Command("docker-untar", dest) - cmd.Stdin = decompressedArchive - - cmd.ExtraFiles = append(cmd.ExtraFiles, r) - output := bytes.NewBuffer(nil) - cmd.Stdout = output - cmd.Stderr = output - - if err := cmd.Start(); err != nil { - return fmt.Errorf("Untar error on re-exec cmd: %v", err) - } - //write the options to the pipe for the untar exec to read - if err := json.NewEncoder(w).Encode(options); err != nil { - return fmt.Errorf("Untar json encode to pipe failed: %v", err) - } - w.Close() - - if err := cmd.Wait(); err != nil { - // when `xz -d -c -q | docker-untar ...` failed on docker-untar side, - // we need to exhaust `xz`'s output, otherwise the `xz` side will be - // pending on write pipe forever - io.Copy(ioutil.Discard, decompressedArchive) - - return fmt.Errorf("Untar re-exec error: %v: output: %s", err, output) - } - return nil -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/chrootarchive/archive_windows.go b/vendor/github.com/hyperhq/hypercli/pkg/chrootarchive/archive_windows.go deleted file mode 100644 index 89dc89a67..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/chrootarchive/archive_windows.go +++ /dev/null @@ -1,22 +0,0 @@ -package chrootarchive - -import ( - "io" - - "github.com/hyperhq/hypercli/pkg/archive" - "github.com/hyperhq/hypercli/pkg/longpath" -) - -// chroot is not supported by Windows -func chroot(path string) error { - return nil -} - -func invokeUnpack(decompressedArchive io.ReadCloser, - dest string, - options *archive.TarOptions) error { - // Windows is different to Linux here because Windows does not support - // chroot. Hence there is no point sandboxing a chrooted process to - // do the unpack. We call inline instead within the daemon process. - return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options) -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/chrootarchive/diff.go b/vendor/github.com/hyperhq/hypercli/pkg/chrootarchive/diff.go deleted file mode 100644 index 6a57407cf..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/chrootarchive/diff.go +++ /dev/null @@ -1,19 +0,0 @@ -package chrootarchive - -import "github.com/hyperhq/hypercli/pkg/archive" - -// ApplyLayer parses a diff in the standard layer format from `layer`, -// and applies it to the directory `dest`. The stream `layer` can only be -// uncompressed. -// Returns the size in bytes of the contents of the layer. -func ApplyLayer(dest string, layer archive.Reader) (size int64, err error) { - return applyLayerHandler(dest, layer, &archive.TarOptions{}, true) -} - -// ApplyUncompressedLayer parses a diff in the standard layer format from -// `layer`, and applies it to the directory `dest`. The stream `layer` -// can only be uncompressed. -// Returns the size in bytes of the contents of the layer. -func ApplyUncompressedLayer(dest string, layer archive.Reader, options *archive.TarOptions) (int64, error) { - return applyLayerHandler(dest, layer, options, false) -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/chrootarchive/diff_unix.go b/vendor/github.com/hyperhq/hypercli/pkg/chrootarchive/diff_unix.go deleted file mode 100644 index 6b5b47306..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/chrootarchive/diff_unix.go +++ /dev/null @@ -1,118 +0,0 @@ -//+build !windows - -package chrootarchive - -import ( - "bytes" - "encoding/json" - "flag" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "runtime" - - "github.com/hyperhq/hypercli/pkg/archive" - "github.com/hyperhq/hypercli/pkg/reexec" - "github.com/hyperhq/hypercli/pkg/system" -) - -type applyLayerResponse struct { - LayerSize int64 `json:"layerSize"` -} - -// applyLayer is the entry-point for docker-applylayer on re-exec. This is not -// used on Windows as it does not support chroot, hence no point sandboxing -// through chroot and rexec. -func applyLayer() { - - var ( - tmpDir = "" - err error - options *archive.TarOptions - ) - runtime.LockOSThread() - flag.Parse() - - if err := chroot(flag.Arg(0)); err != nil { - fatal(err) - } - - // We need to be able to set any perms - oldmask, err := system.Umask(0) - defer system.Umask(oldmask) - if err != nil { - fatal(err) - } - - if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil { - fatal(err) - } - - if tmpDir, err = ioutil.TempDir("/", "temp-docker-extract"); err != nil { - fatal(err) - } - - os.Setenv("TMPDIR", tmpDir) - size, err := archive.UnpackLayer("/", os.Stdin, options) - os.RemoveAll(tmpDir) - if err != nil { - fatal(err) - } - - encoder := json.NewEncoder(os.Stdout) - if err := encoder.Encode(applyLayerResponse{size}); err != nil { - fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err)) - } - - flush(os.Stdout) - flush(os.Stdin) - os.Exit(0) -} - -// applyLayerHandler parses a diff in the standard layer format from `layer`, and -// applies it to the directory `dest`. Returns the size in bytes of the -// contents of the layer. -func applyLayerHandler(dest string, layer archive.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { - dest = filepath.Clean(dest) - if decompress { - decompressed, err := archive.DecompressStream(layer) - if err != nil { - return 0, err - } - defer decompressed.Close() - - layer = decompressed - } - if options == nil { - options = &archive.TarOptions{} - } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } - - data, err := json.Marshal(options) - if err != nil { - return 0, fmt.Errorf("ApplyLayer json encode: %v", err) - } - - cmd := reexec.Command("docker-applyLayer", dest) - cmd.Stdin = layer - cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data)) - - outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer) - cmd.Stdout, cmd.Stderr = outBuf, errBuf - - if err = cmd.Run(); err != nil { - return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf) - } - - // Stdout should be a valid JSON struct representing an applyLayerResponse. - response := applyLayerResponse{} - decoder := json.NewDecoder(outBuf) - if err = decoder.Decode(&response); err != nil { - return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err) - } - - return response.LayerSize, nil -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/chrootarchive/diff_windows.go b/vendor/github.com/hyperhq/hypercli/pkg/chrootarchive/diff_windows.go deleted file mode 100644 index e451824f7..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/chrootarchive/diff_windows.go +++ /dev/null @@ -1,44 +0,0 @@ -package chrootarchive - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - - "github.com/hyperhq/hypercli/pkg/archive" - "github.com/hyperhq/hypercli/pkg/longpath" -) - -// applyLayerHandler parses a diff in the standard layer format from `layer`, and -// applies it to the directory `dest`. Returns the size in bytes of the -// contents of the layer. -func applyLayerHandler(dest string, layer archive.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { - dest = filepath.Clean(dest) - - // Ensure it is a Windows-style volume path - dest = longpath.AddPrefix(dest) - - if decompress { - decompressed, err := archive.DecompressStream(layer) - if err != nil { - return 0, err - } - defer decompressed.Close() - - layer = decompressed - } - - tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-docker-extract") - if err != nil { - return 0, fmt.Errorf("ApplyLayer failed to create temp-docker-extract under %s. %s", dest, err) - } - - s, err := archive.UnpackLayer(dest, layer, nil) - os.RemoveAll(tmpDir) - if err != nil { - return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s", err, dest) - } - - return s, nil -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/chrootarchive/init_unix.go b/vendor/github.com/hyperhq/hypercli/pkg/chrootarchive/init_unix.go deleted file mode 100644 index 24c9d73c5..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/chrootarchive/init_unix.go +++ /dev/null @@ -1,28 +0,0 @@ -// +build !windows - -package chrootarchive - -import ( - "fmt" - "io" - "io/ioutil" - "os" - - "github.com/hyperhq/hypercli/pkg/reexec" -) - -func init() { - reexec.Register("docker-applyLayer", applyLayer) - reexec.Register("docker-untar", untar) -} - -func fatal(err error) { - fmt.Fprint(os.Stderr, err) - os.Exit(1) -} - -// flush consumes all the bytes from the reader discarding -// any errors -func flush(r io.Reader) { - io.Copy(ioutil.Discard, r) -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/chrootarchive/init_windows.go b/vendor/github.com/hyperhq/hypercli/pkg/chrootarchive/init_windows.go deleted file mode 100644 index fa17c9bf8..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/chrootarchive/init_windows.go +++ /dev/null @@ -1,4 +0,0 @@ -package chrootarchive - -func init() { -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/fileutils/fileutils.go b/vendor/github.com/hyperhq/hypercli/pkg/fileutils/fileutils.go deleted file mode 100644 index b5057ecd1..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/fileutils/fileutils.go +++ /dev/null @@ -1,279 +0,0 @@ -package fileutils - -import ( - "errors" - "fmt" - "io" - "os" - "path/filepath" - "regexp" - "strings" - "text/scanner" - - "github.com/Sirupsen/logrus" -) - -// exclusion return true if the specified pattern is an exclusion -func exclusion(pattern string) bool { - return pattern[0] == '!' -} - -// empty return true if the specified pattern is empty -func empty(pattern string) bool { - return pattern == "" -} - -// CleanPatterns takes a slice of patterns returns a new -// slice of patterns cleaned with filepath.Clean, stripped -// of any empty patterns and lets the caller know whether the -// slice contains any exception patterns (prefixed with !). -func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) { - // Loop over exclusion patterns and: - // 1. Clean them up. - // 2. Indicate whether we are dealing with any exception rules. - // 3. Error if we see a single exclusion marker on it's own (!). - cleanedPatterns := []string{} - patternDirs := [][]string{} - exceptions := false - for _, pattern := range patterns { - // Eliminate leading and trailing whitespace. - pattern = strings.TrimSpace(pattern) - if empty(pattern) { - continue - } - if exclusion(pattern) { - if len(pattern) == 1 { - return nil, nil, false, errors.New("Illegal exclusion pattern: !") - } - exceptions = true - } - pattern = filepath.Clean(pattern) - cleanedPatterns = append(cleanedPatterns, pattern) - if exclusion(pattern) { - pattern = pattern[1:] - } - patternDirs = append(patternDirs, strings.Split(pattern, "/")) - } - - return cleanedPatterns, patternDirs, exceptions, nil -} - -// Matches returns true if file matches any of the patterns -// and isn't excluded by any of the subsequent patterns. -func Matches(file string, patterns []string) (bool, error) { - file = filepath.Clean(file) - - if file == "." { - // Don't let them exclude everything, kind of silly. - return false, nil - } - - patterns, patDirs, _, err := CleanPatterns(patterns) - if err != nil { - return false, err - } - - return OptimizedMatches(file, patterns, patDirs) -} - -// OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go. -// It will assume that the inputs have been preprocessed and therefore the function -// doesn't need to do as much error checking and clean-up. This was done to avoid -// repeating these steps on each file being checked during the archive process. -// The more generic fileutils.Matches() can't make these assumptions. -func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) { - matched := false - parentPath := filepath.Dir(file) - parentPathDirs := strings.Split(parentPath, "/") - - for i, pattern := range patterns { - negative := false - - if exclusion(pattern) { - negative = true - pattern = pattern[1:] - } - - match, err := regexpMatch(pattern, file) - if err != nil { - return false, fmt.Errorf("Error in pattern (%s): %s", pattern, err) - } - - if !match && parentPath != "." { - // Check to see if the pattern matches one of our parent dirs. - if len(patDirs[i]) <= len(parentPathDirs) { - match, _ = regexpMatch(strings.Join(patDirs[i], "/"), - strings.Join(parentPathDirs[:len(patDirs[i])], "/")) - } - } - - if match { - matched = !negative - } - } - - if matched { - logrus.Debugf("Skipping excluded path: %s", file) - } - - return matched, nil -} - -// regexpMatch tries to match the logic of filepath.Match but -// does so using regexp logic. We do this so that we can expand the -// wildcard set to include other things, like "**" to mean any number -// of directories. This means that we should be backwards compatible -// with filepath.Match(). We'll end up supporting more stuff, due to -// the fact that we're using regexp, but that's ok - it does no harm. -func regexpMatch(pattern, path string) (bool, error) { - regStr := "^" - - // Do some syntax checking on the pattern. - // filepath's Match() has some really weird rules that are inconsistent - // so instead of trying to dup their logic, just call Match() for its - // error state and if there is an error in the pattern return it. - // If this becomes an issue we can remove this since its really only - // needed in the error (syntax) case - which isn't really critical. - if _, err := filepath.Match(pattern, path); err != nil { - return false, err - } - - // Go through the pattern and convert it to a regexp. - // We use a scanner so we can support utf-8 chars. - var scan scanner.Scanner - scan.Init(strings.NewReader(pattern)) - - sl := string(os.PathSeparator) - escSL := sl - if sl == `\` { - escSL += `\` - } - - for scan.Peek() != scanner.EOF { - ch := scan.Next() - - if ch == '*' { - if scan.Peek() == '*' { - // is some flavor of "**" - scan.Next() - - if scan.Peek() == scanner.EOF { - // is "**EOF" - to align with .gitignore just accept all - regStr += ".*" - } else { - // is "**" - regStr += "((.*" + escSL + ")|([^" + escSL + "]*))" - } - - // Treat **/ as ** so eat the "/" - if string(scan.Peek()) == sl { - scan.Next() - } - } else { - // is "*" so map it to anything but "/" - regStr += "[^" + escSL + "]*" - } - } else if ch == '?' { - // "?" is any char except "/" - regStr += "[^" + escSL + "]" - } else if strings.Index(".$", string(ch)) != -1 { - // Escape some regexp special chars that have no meaning - // in golang's filepath.Match - regStr += `\` + string(ch) - } else if ch == '\\' { - // escape next char. Note that a trailing \ in the pattern - // will be left alone (but need to escape it) - if sl == `\` { - // On windows map "\" to "\\", meaning an escaped backslash, - // and then just continue because filepath.Match on - // Windows doesn't allow escaping at all - regStr += escSL - continue - } - if scan.Peek() != scanner.EOF { - regStr += `\` + string(scan.Next()) - } else { - regStr += `\` - } - } else { - regStr += string(ch) - } - } - - regStr += "$" - - res, err := regexp.MatchString(regStr, path) - - // Map regexp's error to filepath's so no one knows we're not using filepath - if err != nil { - err = filepath.ErrBadPattern - } - - return res, err -} - -// CopyFile copies from src to dst until either EOF is reached -// on src or an error occurs. It verifies src exists and remove -// the dst if it exists. -func CopyFile(src, dst string) (int64, error) { - cleanSrc := filepath.Clean(src) - cleanDst := filepath.Clean(dst) - if cleanSrc == cleanDst { - return 0, nil - } - sf, err := os.Open(cleanSrc) - if err != nil { - return 0, err - } - defer sf.Close() - if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) { - return 0, err - } - df, err := os.Create(cleanDst) - if err != nil { - return 0, err - } - defer df.Close() - return io.Copy(df, sf) -} - -// ReadSymlinkedDirectory returns the target directory of a symlink. -// The target of the symbolic link may not be a file. -func ReadSymlinkedDirectory(path string) (string, error) { - var realPath string - var err error - if realPath, err = filepath.Abs(path); err != nil { - return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) - } - if realPath, err = filepath.EvalSymlinks(realPath); err != nil { - return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) - } - realPathInfo, err := os.Stat(realPath) - if err != nil { - return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) - } - if !realPathInfo.Mode().IsDir() { - return "", fmt.Errorf("canonical path points to a file '%s'", realPath) - } - return realPath, nil -} - -// CreateIfNotExists creates a file or a directory only if it does not already exist. -func CreateIfNotExists(path string, isDir bool) error { - if _, err := os.Stat(path); err != nil { - if os.IsNotExist(err) { - if isDir { - return os.MkdirAll(path, 0755) - } - if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { - return err - } - f, err := os.OpenFile(path, os.O_CREATE, 0755) - if err != nil { - return err - } - f.Close() - } - } - return nil -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/fileutils/fileutils_unix.go b/vendor/github.com/hyperhq/hypercli/pkg/fileutils/fileutils_unix.go deleted file mode 100644 index d5c3abf56..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/fileutils/fileutils_unix.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build linux freebsd - -package fileutils - -import ( - "fmt" - "io/ioutil" - "os" - - "github.com/Sirupsen/logrus" -) - -// GetTotalUsedFds Returns the number of used File Descriptors by -// reading it via /proc filesystem. -func GetTotalUsedFds() int { - if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { - logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) - } else { - return len(fds) - } - return -1 -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/fileutils/fileutils_windows.go b/vendor/github.com/hyperhq/hypercli/pkg/fileutils/fileutils_windows.go deleted file mode 100644 index 5ec21cace..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/fileutils/fileutils_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -package fileutils - -// GetTotalUsedFds Returns the number of used File Descriptors. Not supported -// on Windows. -func GetTotalUsedFds() int { - return -1 -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/homedir/homedir.go b/vendor/github.com/hyperhq/hypercli/pkg/homedir/homedir.go deleted file mode 100644 index 8154e83f0..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/homedir/homedir.go +++ /dev/null @@ -1,39 +0,0 @@ -package homedir - -import ( - "os" - "runtime" - - "github.com/opencontainers/runc/libcontainer/user" -) - -// Key returns the env var name for the user's home dir based on -// the platform being run on -func Key() string { - if runtime.GOOS == "windows" { - return "USERPROFILE" - } - return "HOME" -} - -// Get returns the home directory of the current user with the help of -// environment variables depending on the target operating system. -// Returned path should be used with "path/filepath" to form new paths. -func Get() string { - home := os.Getenv(Key()) - if home == "" && runtime.GOOS != "windows" { - if u, err := user.CurrentUser(); err == nil { - return u.Home - } - } - return home -} - -// GetShortcutString returns the string that is shortcut to user's home directory -// in the native shell of the platform running on. -func GetShortcutString() string { - if runtime.GOOS == "windows" { - return "%USERPROFILE%" // be careful while using in format functions - } - return "~" -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/httputils/httputils.go b/vendor/github.com/hyperhq/hypercli/pkg/httputils/httputils.go deleted file mode 100644 index f8749eee1..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/httputils/httputils.go +++ /dev/null @@ -1,56 +0,0 @@ -package httputils - -import ( - "errors" - "fmt" - "net/http" - "regexp" - "strings" - - "github.com/hyperhq/hypercli/pkg/jsonmessage" -) - -var ( - headerRegexp = regexp.MustCompile(`^(?:(.+)/(.+?))\((.+)\).*$`) - errInvalidHeader = errors.New("Bad header, should be in format `docker/version (platform)`") -) - -// Download requests a given URL and returns an io.Reader. -func Download(url string) (resp *http.Response, err error) { - if resp, err = http.Get(url); err != nil { - return nil, err - } - if resp.StatusCode >= 400 { - return nil, fmt.Errorf("Got HTTP status code >= 400: %s", resp.Status) - } - return resp, nil -} - -// NewHTTPRequestError returns a JSON response error. -func NewHTTPRequestError(msg string, res *http.Response) error { - return &jsonmessage.JSONError{ - Message: msg, - Code: res.StatusCode, - } -} - -// ServerHeader contains the server information. -type ServerHeader struct { - App string // docker - Ver string // 1.8.0-dev - OS string // windows or linux -} - -// ParseServerHeader extracts pieces from an HTTP server header -// which is in the format "docker/version (os)" eg docker/1.8.0-dev (windows). -func ParseServerHeader(hdr string) (*ServerHeader, error) { - matches := headerRegexp.FindStringSubmatch(hdr) - if len(matches) != 4 { - return nil, errInvalidHeader - } - return &ServerHeader{ - App: strings.TrimSpace(matches[1]), - Ver: strings.TrimSpace(matches[2]), - OS: strings.TrimSpace(matches[3]), - }, nil -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/httputils/mimetype.go b/vendor/github.com/hyperhq/hypercli/pkg/httputils/mimetype.go deleted file mode 100644 index d5cf34e4f..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/httputils/mimetype.go +++ /dev/null @@ -1,30 +0,0 @@ -package httputils - -import ( - "mime" - "net/http" -) - -// MimeTypes stores the MIME content type. -var MimeTypes = struct { - TextPlain string - Tar string - OctetStream string -}{"text/plain", "application/tar", "application/octet-stream"} - -// DetectContentType returns a best guess representation of the MIME -// content type for the bytes at c. The value detected by -// http.DetectContentType is guaranteed not be nil, defaulting to -// application/octet-stream when a better guess cannot be made. The -// result of this detection is then run through mime.ParseMediaType() -// which separates the actual MIME string from any parameters. -func DetectContentType(c []byte) (string, map[string]string, error) { - - ct := http.DetectContentType(c) - contentType, args, err := mime.ParseMediaType(ct) - if err != nil { - return "", nil, err - } - - return contentType, args, nil -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/httputils/resumablerequestreader.go b/vendor/github.com/hyperhq/hypercli/pkg/httputils/resumablerequestreader.go deleted file mode 100644 index bebc8608c..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/httputils/resumablerequestreader.go +++ /dev/null @@ -1,95 +0,0 @@ -package httputils - -import ( - "fmt" - "io" - "net/http" - "time" - - "github.com/Sirupsen/logrus" -) - -type resumableRequestReader struct { - client *http.Client - request *http.Request - lastRange int64 - totalSize int64 - currentResponse *http.Response - failures uint32 - maxFailures uint32 -} - -// ResumableRequestReader makes it possible to resume reading a request's body transparently -// maxfail is the number of times we retry to make requests again (not resumes) -// totalsize is the total length of the body; auto detect if not provided -func ResumableRequestReader(c *http.Client, r *http.Request, maxfail uint32, totalsize int64) io.ReadCloser { - return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize} -} - -// ResumableRequestReaderWithInitialResponse makes it possible to resume -// reading the body of an already initiated request. -func ResumableRequestReaderWithInitialResponse(c *http.Client, r *http.Request, maxfail uint32, totalsize int64, initialResponse *http.Response) io.ReadCloser { - return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, currentResponse: initialResponse} -} - -func (r *resumableRequestReader) Read(p []byte) (n int, err error) { - if r.client == nil || r.request == nil { - return 0, fmt.Errorf("client and request can't be nil\n") - } - isFreshRequest := false - if r.lastRange != 0 && r.currentResponse == nil { - readRange := fmt.Sprintf("bytes=%d-%d", r.lastRange, r.totalSize) - r.request.Header.Set("Range", readRange) - time.Sleep(5 * time.Second) - } - if r.currentResponse == nil { - r.currentResponse, err = r.client.Do(r.request) - isFreshRequest = true - } - if err != nil && r.failures+1 != r.maxFailures { - r.cleanUpResponse() - r.failures++ - time.Sleep(5 * time.Duration(r.failures) * time.Second) - return 0, nil - } else if err != nil { - r.cleanUpResponse() - return 0, err - } - if r.currentResponse.StatusCode == 416 && r.lastRange == r.totalSize && r.currentResponse.ContentLength == 0 { - r.cleanUpResponse() - return 0, io.EOF - } else if r.currentResponse.StatusCode != 206 && r.lastRange != 0 && isFreshRequest { - r.cleanUpResponse() - return 0, fmt.Errorf("the server doesn't support byte ranges") - } - if r.totalSize == 0 { - r.totalSize = r.currentResponse.ContentLength - } else if r.totalSize <= 0 { - r.cleanUpResponse() - return 0, fmt.Errorf("failed to auto detect content length") - } - n, err = r.currentResponse.Body.Read(p) - r.lastRange += int64(n) - if err != nil { - r.cleanUpResponse() - } - if err != nil && err != io.EOF { - logrus.Infof("encountered error during pull and clearing it before resume: %s", err) - err = nil - } - return n, err -} - -func (r *resumableRequestReader) Close() error { - r.cleanUpResponse() - r.client = nil - r.request = nil - return nil -} - -func (r *resumableRequestReader) cleanUpResponse() { - if r.currentResponse != nil { - r.currentResponse.Body.Close() - r.currentResponse = nil - } -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/idtools/idtools.go b/vendor/github.com/hyperhq/hypercli/pkg/idtools/idtools.go deleted file mode 100644 index a1301ee97..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/idtools/idtools.go +++ /dev/null @@ -1,195 +0,0 @@ -package idtools - -import ( - "bufio" - "fmt" - "os" - "sort" - "strconv" - "strings" -) - -// IDMap contains a single entry for user namespace range remapping. An array -// of IDMap entries represents the structure that will be provided to the Linux -// kernel for creating a user namespace. -type IDMap struct { - ContainerID int `json:"container_id"` - HostID int `json:"host_id"` - Size int `json:"size"` -} - -type subIDRange struct { - Start int - Length int -} - -type ranges []subIDRange - -func (e ranges) Len() int { return len(e) } -func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } -func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } - -const ( - subuidFileName string = "/etc/subuid" - subgidFileName string = "/etc/subgid" -) - -// MkdirAllAs creates a directory (include any along the path) and then modifies -// ownership to the requested uid/gid. If the directory already exists, this -// function will still change ownership to the requested uid/gid pair. -func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { - return mkdirAs(path, mode, ownerUID, ownerGID, true, true) -} - -// MkdirAllNewAs creates a directory (include any along the path) and then modifies -// ownership ONLY of newly created directories to the requested uid/gid. If the -// directories along the path exist, no change of ownership will be performed -func MkdirAllNewAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { - return mkdirAs(path, mode, ownerUID, ownerGID, true, false) -} - -// MkdirAs creates a directory and then modifies ownership to the requested uid/gid. -// If the directory already exists, this function still changes ownership -func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { - return mkdirAs(path, mode, ownerUID, ownerGID, false, true) -} - -// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. -// If the maps are empty, then the root uid/gid will default to "real" 0/0 -func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { - var uid, gid int - - if uidMap != nil { - xUID, err := ToHost(0, uidMap) - if err != nil { - return -1, -1, err - } - uid = xUID - } - if gidMap != nil { - xGID, err := ToHost(0, gidMap) - if err != nil { - return -1, -1, err - } - gid = xGID - } - return uid, gid, nil -} - -// ToContainer takes an id mapping, and uses it to translate a -// host ID to the remapped ID. If no map is provided, then the translation -// assumes a 1-to-1 mapping and returns the passed in id -func ToContainer(hostID int, idMap []IDMap) (int, error) { - if idMap == nil { - return hostID, nil - } - for _, m := range idMap { - if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) { - contID := m.ContainerID + (hostID - m.HostID) - return contID, nil - } - } - return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) -} - -// ToHost takes an id mapping and a remapped ID, and translates the -// ID to the mapped host ID. If no map is provided, then the translation -// assumes a 1-to-1 mapping and returns the passed in id # -func ToHost(contID int, idMap []IDMap) (int, error) { - if idMap == nil { - return contID, nil - } - for _, m := range idMap { - if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) { - hostID := m.HostID + (contID - m.ContainerID) - return hostID, nil - } - } - return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) -} - -// CreateIDMappings takes a requested user and group name and -// using the data from /etc/sub{uid,gid} ranges, creates the -// proper uid and gid remapping ranges for that user/group pair -func CreateIDMappings(username, groupname string) ([]IDMap, []IDMap, error) { - subuidRanges, err := parseSubuid(username) - if err != nil { - return nil, nil, err - } - subgidRanges, err := parseSubgid(groupname) - if err != nil { - return nil, nil, err - } - if len(subuidRanges) == 0 { - return nil, nil, fmt.Errorf("No subuid ranges found for user %q", username) - } - if len(subgidRanges) == 0 { - return nil, nil, fmt.Errorf("No subgid ranges found for group %q", groupname) - } - - return createIDMap(subuidRanges), createIDMap(subgidRanges), nil -} - -func createIDMap(subidRanges ranges) []IDMap { - idMap := []IDMap{} - - // sort the ranges by lowest ID first - sort.Sort(subidRanges) - containerID := 0 - for _, idrange := range subidRanges { - idMap = append(idMap, IDMap{ - ContainerID: containerID, - HostID: idrange.Start, - Size: idrange.Length, - }) - containerID = containerID + idrange.Length - } - return idMap -} - -func parseSubuid(username string) (ranges, error) { - return parseSubidFile(subuidFileName, username) -} - -func parseSubgid(username string) (ranges, error) { - return parseSubidFile(subgidFileName, username) -} - -func parseSubidFile(path, username string) (ranges, error) { - var rangeList ranges - - subidFile, err := os.Open(path) - if err != nil { - return rangeList, err - } - defer subidFile.Close() - - s := bufio.NewScanner(subidFile) - for s.Scan() { - if err := s.Err(); err != nil { - return rangeList, err - } - - text := strings.TrimSpace(s.Text()) - if text == "" { - continue - } - parts := strings.Split(text, ":") - if len(parts) != 3 { - return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path) - } - if parts[0] == username { - // return the first entry for a user; ignores potential for multiple ranges per user - startid, err := strconv.Atoi(parts[1]) - if err != nil { - return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) - } - length, err := strconv.Atoi(parts[2]) - if err != nil { - return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) - } - rangeList = append(rangeList, subIDRange{startid, length}) - } - } - return rangeList, nil -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/idtools/idtools_unix.go b/vendor/github.com/hyperhq/hypercli/pkg/idtools/idtools_unix.go deleted file mode 100644 index 5a977c5b1..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/idtools/idtools_unix.go +++ /dev/null @@ -1,60 +0,0 @@ -// +build !windows - -package idtools - -import ( - "os" - "path/filepath" - - "github.com/hyperhq/hypercli/pkg/system" -) - -func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { - // make an array containing the original path asked for, plus (for mkAll == true) - // all path components leading up to the complete path that don't exist before we MkdirAll - // so that we can chown all of them properly at the end. If chownExisting is false, we won't - // chown the full directory path if it exists - var paths []string - if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { - paths = []string{path} - } else if err == nil && chownExisting { - if err := os.Chown(path, ownerUID, ownerGID); err != nil { - return err - } - // short-circuit--we were called with an existing directory and chown was requested - return nil - } else if err == nil { - // nothing to do; directory path fully exists already and chown was NOT requested - return nil - } - - if mkAll { - // walk back to "/" looking for directories which do not exist - // and add them to the paths array for chown after creation - dirPath := path - for { - dirPath = filepath.Dir(dirPath) - if dirPath == "/" { - break - } - if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) { - paths = append(paths, dirPath) - } - } - if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { - return err - } - } else { - if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) { - return err - } - } - // even if it existed, we will chown the requested path + any subpaths that - // didn't exist when we called MkdirAll - for _, pathComponent := range paths { - if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/idtools/idtools_windows.go b/vendor/github.com/hyperhq/hypercli/pkg/idtools/idtools_windows.go deleted file mode 100644 index 0db907a8b..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/idtools/idtools_windows.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build windows - -package idtools - -import ( - "os" - - "github.com/hyperhq/hypercli/pkg/system" -) - -// Platforms such as Windows do not support the UID/GID concept. So make this -// just a wrapper around system.MkdirAll. -func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { - if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { - return err - } - return nil -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/hyperhq/hypercli/pkg/idtools/usergroupadd_linux.go deleted file mode 100644 index c1eedff10..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/idtools/usergroupadd_linux.go +++ /dev/null @@ -1,155 +0,0 @@ -package idtools - -import ( - "fmt" - "os/exec" - "path/filepath" - "strings" - "syscall" -) - -// add a user and/or group to Linux /etc/passwd, /etc/group using standard -// Linux distribution commands: -// adduser --uid --shell /bin/login --no-create-home --disabled-login --ingroup -// useradd -M -u -s /bin/nologin -N -g -// addgroup --gid -// groupadd -g - -const baseUID int = 10000 -const baseGID int = 10000 -const idMAX int = 65534 - -var ( - userCommand string - groupCommand string - - cmdTemplates = map[string]string{ - "adduser": "--uid %d --shell /bin/false --no-create-home --disabled-login --ingroup %s %s", - "useradd": "-M -u %d -s /bin/false -N -g %s %s", - "addgroup": "--gid %d %s", - "groupadd": "-g %d %s", - } -) - -func init() { - // set up which commands are used for adding users/groups dependent on distro - if _, err := resolveBinary("adduser"); err == nil { - userCommand = "adduser" - } else if _, err := resolveBinary("useradd"); err == nil { - userCommand = "useradd" - } - if _, err := resolveBinary("addgroup"); err == nil { - groupCommand = "addgroup" - } else if _, err := resolveBinary("groupadd"); err == nil { - groupCommand = "groupadd" - } -} - -func resolveBinary(binname string) (string, error) { - binaryPath, err := exec.LookPath(binname) - if err != nil { - return "", err - } - resolvedPath, err := filepath.EvalSymlinks(binaryPath) - if err != nil { - return "", err - } - //only return no error if the final resolved binary basename - //matches what was searched for - if filepath.Base(resolvedPath) == binname { - return resolvedPath, nil - } - return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) -} - -// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair -// and calls the appropriate helper function to add the group and then -// the user to the group in /etc/group and /etc/passwd respectively. -// This new user's /etc/sub{uid,gid} ranges will be used for user namespace -// mapping ranges in containers. -func AddNamespaceRangesUser(name string) (int, int, error) { - // Find unused uid, gid pair - uid, err := findUnusedUID(baseUID) - if err != nil { - return -1, -1, fmt.Errorf("Unable to find unused UID: %v", err) - } - gid, err := findUnusedGID(baseGID) - if err != nil { - return -1, -1, fmt.Errorf("Unable to find unused GID: %v", err) - } - - // First add the group that we will use - if err := addGroup(name, gid); err != nil { - return -1, -1, fmt.Errorf("Error adding group %q: %v", name, err) - } - // Add the user as a member of the group - if err := addUser(name, uid, name); err != nil { - return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err) - } - return uid, gid, nil -} - -func addUser(userName string, uid int, groupName string) error { - - if userCommand == "" { - return fmt.Errorf("Cannot add user; no useradd/adduser binary found") - } - args := fmt.Sprintf(cmdTemplates[userCommand], uid, groupName, userName) - return execAddCmd(userCommand, args) -} - -func addGroup(groupName string, gid int) error { - - if groupCommand == "" { - return fmt.Errorf("Cannot add group; no groupadd/addgroup binary found") - } - args := fmt.Sprintf(cmdTemplates[groupCommand], gid, groupName) - // only error out if the error isn't that the group already exists - // if the group exists then our needs are already met - if err := execAddCmd(groupCommand, args); err != nil && !strings.Contains(err.Error(), "already exists") { - return err - } - return nil -} - -func execAddCmd(cmd, args string) error { - execCmd := exec.Command(cmd, strings.Split(args, " ")...) - out, err := execCmd.CombinedOutput() - if err != nil { - return fmt.Errorf("Failed to add user/group with error: %v; output: %q", err, string(out)) - } - return nil -} - -func findUnusedUID(startUID int) (int, error) { - return findUnused("passwd", startUID) -} - -func findUnusedGID(startGID int) (int, error) { - return findUnused("group", startGID) -} - -func findUnused(file string, id int) (int, error) { - for { - cmdStr := fmt.Sprintf("cat /etc/%s | cut -d: -f3 | grep '^%d$'", file, id) - cmd := exec.Command("sh", "-c", cmdStr) - if err := cmd.Run(); err != nil { - // if a non-zero return code occurs, then we know the ID was not found - // and is usable - if exiterr, ok := err.(*exec.ExitError); ok { - // The program has exited with an exit code != 0 - if status, ok := exiterr.Sys().(syscall.WaitStatus); ok { - if status.ExitStatus() == 1 { - //no match, we can use this ID - return id, nil - } - } - } - return -1, fmt.Errorf("Error looking in /etc/%s for unused ID: %v", file, err) - } - id++ - if id > idMAX { - return -1, fmt.Errorf("Maximum id in %q reached with finding unused numeric ID", file) - } - } -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/hyperhq/hypercli/pkg/idtools/usergroupadd_unsupported.go deleted file mode 100644 index d98b354cb..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/idtools/usergroupadd_unsupported.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !linux - -package idtools - -import "fmt" - -// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair -// and calls the appropriate helper function to add the group and then -// the user to the group in /etc/group and /etc/passwd respectively. -func AddNamespaceRangesUser(name string) (int, int, error) { - return -1, -1, fmt.Errorf("No support for adding users or groups on this OS") -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/ioutils/bytespipe.go b/vendor/github.com/hyperhq/hypercli/pkg/ioutils/bytespipe.go deleted file mode 100644 index e263c284f..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/ioutils/bytespipe.go +++ /dev/null @@ -1,152 +0,0 @@ -package ioutils - -import ( - "errors" - "io" - "sync" -) - -// maxCap is the highest capacity to use in byte slices that buffer data. -const maxCap = 1e6 - -// blockThreshold is the minimum number of bytes in the buffer which will cause -// a write to BytesPipe to block when allocating a new slice. -const blockThreshold = 1e6 - -// ErrClosed is returned when Write is called on a closed BytesPipe. -var ErrClosed = errors.New("write to closed BytesPipe") - -// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue). -// All written data may be read at most once. Also, BytesPipe allocates -// and releases new byte slices to adjust to current needs, so the buffer -// won't be overgrown after peak loads. -type BytesPipe struct { - mu sync.Mutex - wait *sync.Cond - buf [][]byte // slice of byte-slices of buffered data - lastRead int // index in the first slice to a read point - bufLen int // length of data buffered over the slices - closeErr error // error to return from next Read. set to nil if not closed. -} - -// NewBytesPipe creates new BytesPipe, initialized by specified slice. -// If buf is nil, then it will be initialized with slice which cap is 64. -// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf). -func NewBytesPipe(buf []byte) *BytesPipe { - if cap(buf) == 0 { - buf = make([]byte, 0, 64) - } - bp := &BytesPipe{ - buf: [][]byte{buf[:0]}, - } - bp.wait = sync.NewCond(&bp.mu) - return bp -} - -// Write writes p to BytesPipe. -// It can allocate new []byte slices in a process of writing. -func (bp *BytesPipe) Write(p []byte) (int, error) { - bp.mu.Lock() - defer bp.mu.Unlock() - written := 0 - for { - if bp.closeErr != nil { - return written, ErrClosed - } - // write data to the last buffer - b := bp.buf[len(bp.buf)-1] - // copy data to the current empty allocated area - n := copy(b[len(b):cap(b)], p) - // increment buffered data length - bp.bufLen += n - // include written data in last buffer - bp.buf[len(bp.buf)-1] = b[:len(b)+n] - - written += n - - // if there was enough room to write all then break - if len(p) == n { - break - } - - // more data: write to the next slice - p = p[n:] - - // block if too much data is still in the buffer - for bp.bufLen >= blockThreshold { - bp.wait.Wait() - } - - // allocate slice that has twice the size of the last unless maximum reached - nextCap := 2 * cap(bp.buf[len(bp.buf)-1]) - if nextCap > maxCap { - nextCap = maxCap - } - // add new byte slice to the buffers slice and continue writing - bp.buf = append(bp.buf, make([]byte, 0, nextCap)) - } - bp.wait.Broadcast() - return written, nil -} - -// CloseWithError causes further reads from a BytesPipe to return immediately. -func (bp *BytesPipe) CloseWithError(err error) error { - bp.mu.Lock() - if err != nil { - bp.closeErr = err - } else { - bp.closeErr = io.EOF - } - bp.wait.Broadcast() - bp.mu.Unlock() - return nil -} - -// Close causes further reads from a BytesPipe to return immediately. -func (bp *BytesPipe) Close() error { - return bp.CloseWithError(nil) -} - -func (bp *BytesPipe) len() int { - return bp.bufLen - bp.lastRead -} - -// Read reads bytes from BytesPipe. -// Data could be read only once. -func (bp *BytesPipe) Read(p []byte) (n int, err error) { - bp.mu.Lock() - defer bp.mu.Unlock() - if bp.len() == 0 { - if bp.closeErr != nil { - return 0, bp.closeErr - } - bp.wait.Wait() - if bp.len() == 0 && bp.closeErr != nil { - return 0, bp.closeErr - } - } - for { - read := copy(p, bp.buf[0][bp.lastRead:]) - n += read - bp.lastRead += read - if bp.len() == 0 { - // we have read everything. reset to the beginning. - bp.lastRead = 0 - bp.bufLen -= len(bp.buf[0]) - bp.buf[0] = bp.buf[0][:0] - break - } - // break if everything was read - if len(p) == read { - break - } - // more buffered data and more asked. read from next slice. - p = p[read:] - bp.lastRead = 0 - bp.bufLen -= len(bp.buf[0]) - bp.buf[0] = nil // throw away old slice - bp.buf = bp.buf[1:] // switch to next - } - bp.wait.Broadcast() - return -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/ioutils/fmt.go b/vendor/github.com/hyperhq/hypercli/pkg/ioutils/fmt.go deleted file mode 100644 index 0b04b0ba3..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/ioutils/fmt.go +++ /dev/null @@ -1,22 +0,0 @@ -package ioutils - -import ( - "fmt" - "io" -) - -// FprintfIfNotEmpty prints the string value if it's not empty -func FprintfIfNotEmpty(w io.Writer, format, value string) (int, error) { - if value != "" { - return fmt.Fprintf(w, format, value) - } - return 0, nil -} - -// FprintfIfTrue prints the boolean value if it's true -func FprintfIfTrue(w io.Writer, format string, ok bool) (int, error) { - if ok { - return fmt.Fprintf(w, format, ok) - } - return 0, nil -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/ioutils/multireader.go b/vendor/github.com/hyperhq/hypercli/pkg/ioutils/multireader.go deleted file mode 100644 index 0d2d76b47..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/ioutils/multireader.go +++ /dev/null @@ -1,226 +0,0 @@ -package ioutils - -import ( - "bytes" - "fmt" - "io" - "os" -) - -type pos struct { - idx int - offset int64 -} - -type multiReadSeeker struct { - readers []io.ReadSeeker - pos *pos - posIdx map[io.ReadSeeker]int -} - -func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) { - var tmpOffset int64 - switch whence { - case os.SEEK_SET: - for i, rdr := range r.readers { - // get size of the current reader - s, err := rdr.Seek(0, os.SEEK_END) - if err != nil { - return -1, err - } - - if offset > tmpOffset+s { - if i == len(r.readers)-1 { - rdrOffset := s + (offset - tmpOffset) - if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil { - return -1, err - } - r.pos = &pos{i, rdrOffset} - return offset, nil - } - - tmpOffset += s - continue - } - - rdrOffset := offset - tmpOffset - idx := i - - rdr.Seek(rdrOffset, os.SEEK_SET) - // make sure all following readers are at 0 - for _, rdr := range r.readers[i+1:] { - rdr.Seek(0, os.SEEK_SET) - } - - if rdrOffset == s && i != len(r.readers)-1 { - idx++ - rdrOffset = 0 - } - r.pos = &pos{idx, rdrOffset} - return offset, nil - } - case os.SEEK_END: - for _, rdr := range r.readers { - s, err := rdr.Seek(0, os.SEEK_END) - if err != nil { - return -1, err - } - tmpOffset += s - } - r.Seek(tmpOffset+offset, os.SEEK_SET) - return tmpOffset + offset, nil - case os.SEEK_CUR: - if r.pos == nil { - return r.Seek(offset, os.SEEK_SET) - } - // Just return the current offset - if offset == 0 { - return r.getCurOffset() - } - - curOffset, err := r.getCurOffset() - if err != nil { - return -1, err - } - rdr, rdrOffset, err := r.getReaderForOffset(curOffset + offset) - if err != nil { - return -1, err - } - - r.pos = &pos{r.posIdx[rdr], rdrOffset} - return curOffset + offset, nil - default: - return -1, fmt.Errorf("Invalid whence: %d", whence) - } - - return -1, fmt.Errorf("Error seeking for whence: %d, offset: %d", whence, offset) -} - -func (r *multiReadSeeker) getReaderForOffset(offset int64) (io.ReadSeeker, int64, error) { - var rdr io.ReadSeeker - var rdrOffset int64 - - for i, rdr := range r.readers { - offsetTo, err := r.getOffsetToReader(rdr) - if err != nil { - return nil, -1, err - } - if offsetTo > offset { - rdr = r.readers[i-1] - rdrOffset = offsetTo - offset - break - } - - if rdr == r.readers[len(r.readers)-1] { - rdrOffset = offsetTo + offset - break - } - } - - return rdr, rdrOffset, nil -} - -func (r *multiReadSeeker) getCurOffset() (int64, error) { - var totalSize int64 - for _, rdr := range r.readers[:r.pos.idx+1] { - if r.posIdx[rdr] == r.pos.idx { - totalSize += r.pos.offset - break - } - - size, err := getReadSeekerSize(rdr) - if err != nil { - return -1, fmt.Errorf("error getting seeker size: %v", err) - } - totalSize += size - } - return totalSize, nil -} - -func (r *multiReadSeeker) getOffsetToReader(rdr io.ReadSeeker) (int64, error) { - var offset int64 - for _, r := range r.readers { - if r == rdr { - break - } - - size, err := getReadSeekerSize(rdr) - if err != nil { - return -1, err - } - offset += size - } - return offset, nil -} - -func (r *multiReadSeeker) Read(b []byte) (int, error) { - if r.pos == nil { - r.pos = &pos{0, 0} - } - - bCap := int64(cap(b)) - buf := bytes.NewBuffer(nil) - var rdr io.ReadSeeker - - for _, rdr = range r.readers[r.pos.idx:] { - readBytes, err := io.CopyN(buf, rdr, bCap) - if err != nil && err != io.EOF { - return -1, err - } - bCap -= readBytes - - if bCap == 0 { - break - } - } - - rdrPos, err := rdr.Seek(0, os.SEEK_CUR) - if err != nil { - return -1, err - } - r.pos = &pos{r.posIdx[rdr], rdrPos} - return buf.Read(b) -} - -func getReadSeekerSize(rdr io.ReadSeeker) (int64, error) { - // save the current position - pos, err := rdr.Seek(0, os.SEEK_CUR) - if err != nil { - return -1, err - } - - // get the size - size, err := rdr.Seek(0, os.SEEK_END) - if err != nil { - return -1, err - } - - // reset the position - if _, err := rdr.Seek(pos, os.SEEK_SET); err != nil { - return -1, err - } - return size, nil -} - -// MultiReadSeeker returns a ReadSeeker that's the logical concatenation of the provided -// input readseekers. After calling this method the initial position is set to the -// beginning of the first ReadSeeker. At the end of a ReadSeeker, Read always advances -// to the beginning of the next ReadSeeker and returns EOF at the end of the last ReadSeeker. -// Seek can be used over the sum of lengths of all readseekers. -// -// When a MultiReadSeeker is used, no Read and Seek operations should be made on -// its ReadSeeker components. Also, users should make no assumption on the state -// of individual readseekers while the MultiReadSeeker is used. -func MultiReadSeeker(readers ...io.ReadSeeker) io.ReadSeeker { - if len(readers) == 1 { - return readers[0] - } - idx := make(map[io.ReadSeeker]int) - for i, rdr := range readers { - idx[rdr] = i - } - return &multiReadSeeker{ - readers: readers, - posIdx: idx, - } -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/ioutils/readers.go b/vendor/github.com/hyperhq/hypercli/pkg/ioutils/readers.go deleted file mode 100644 index e73b02bbf..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/ioutils/readers.go +++ /dev/null @@ -1,154 +0,0 @@ -package ioutils - -import ( - "crypto/sha256" - "encoding/hex" - "io" - - "golang.org/x/net/context" -) - -type readCloserWrapper struct { - io.Reader - closer func() error -} - -func (r *readCloserWrapper) Close() error { - return r.closer() -} - -// NewReadCloserWrapper returns a new io.ReadCloser. -func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { - return &readCloserWrapper{ - Reader: r, - closer: closer, - } -} - -type readerErrWrapper struct { - reader io.Reader - closer func() -} - -func (r *readerErrWrapper) Read(p []byte) (int, error) { - n, err := r.reader.Read(p) - if err != nil { - r.closer() - } - return n, err -} - -// NewReaderErrWrapper returns a new io.Reader. -func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { - return &readerErrWrapper{ - reader: r, - closer: closer, - } -} - -// HashData returns the sha256 sum of src. -func HashData(src io.Reader) (string, error) { - h := sha256.New() - if _, err := io.Copy(h, src); err != nil { - return "", err - } - return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil -} - -// OnEOFReader wraps a io.ReadCloser and a function -// the function will run at the end of file or close the file. -type OnEOFReader struct { - Rc io.ReadCloser - Fn func() -} - -func (r *OnEOFReader) Read(p []byte) (n int, err error) { - n, err = r.Rc.Read(p) - if err == io.EOF { - r.runFunc() - } - return -} - -// Close closes the file and run the function. -func (r *OnEOFReader) Close() error { - err := r.Rc.Close() - r.runFunc() - return err -} - -func (r *OnEOFReader) runFunc() { - if fn := r.Fn; fn != nil { - fn() - r.Fn = nil - } -} - -// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read -// operations. -type cancelReadCloser struct { - cancel func() - pR *io.PipeReader // Stream to read from - pW *io.PipeWriter -} - -// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the -// context is cancelled. The returned io.ReadCloser must be closed when it is -// no longer needed. -func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser { - pR, pW := io.Pipe() - - // Create a context used to signal when the pipe is closed - doneCtx, cancel := context.WithCancel(context.Background()) - - p := &cancelReadCloser{ - cancel: cancel, - pR: pR, - pW: pW, - } - - go func() { - _, err := io.Copy(pW, in) - select { - case <-ctx.Done(): - // If the context was closed, p.closeWithError - // was already called. Calling it again would - // change the error that Read returns. - default: - p.closeWithError(err) - } - in.Close() - }() - go func() { - for { - select { - case <-ctx.Done(): - p.closeWithError(ctx.Err()) - case <-doneCtx.Done(): - return - } - } - }() - - return p -} - -// Read wraps the Read method of the pipe that provides data from the wrapped -// ReadCloser. -func (p *cancelReadCloser) Read(buf []byte) (n int, err error) { - return p.pR.Read(buf) -} - -// closeWithError closes the wrapper and its underlying reader. It will -// cause future calls to Read to return err. -func (p *cancelReadCloser) closeWithError(err error) { - p.pW.CloseWithError(err) - p.cancel() -} - -// Close closes the wrapper its underlying reader. It will cause -// future calls to Read to return io.EOF. -func (p *cancelReadCloser) Close() error { - p.closeWithError(io.EOF) - return nil -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/ioutils/scheduler.go b/vendor/github.com/hyperhq/hypercli/pkg/ioutils/scheduler.go deleted file mode 100644 index 3c88f29e3..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/ioutils/scheduler.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build !gccgo - -package ioutils - -func callSchedulerIfNecessary() { -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/ioutils/scheduler_gccgo.go b/vendor/github.com/hyperhq/hypercli/pkg/ioutils/scheduler_gccgo.go deleted file mode 100644 index c11d02b94..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/ioutils/scheduler_gccgo.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build gccgo - -package ioutils - -import ( - "runtime" -) - -func callSchedulerIfNecessary() { - //allow or force Go scheduler to switch context, without explicitly - //forcing this will make it hang when using gccgo implementation - runtime.Gosched() -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/ioutils/temp_unix.go b/vendor/github.com/hyperhq/hypercli/pkg/ioutils/temp_unix.go deleted file mode 100644 index 1539ad21b..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/ioutils/temp_unix.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !windows - -package ioutils - -import "io/ioutil" - -// TempDir on Unix systems is equivalent to ioutil.TempDir. -func TempDir(dir, prefix string) (string, error) { - return ioutil.TempDir(dir, prefix) -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/ioutils/temp_windows.go b/vendor/github.com/hyperhq/hypercli/pkg/ioutils/temp_windows.go deleted file mode 100644 index c746bae5e..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/ioutils/temp_windows.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build windows - -package ioutils - -import ( - "io/ioutil" - - "github.com/hyperhq/hypercli/pkg/longpath" -) - -// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format. -func TempDir(dir, prefix string) (string, error) { - tempDir, err := ioutil.TempDir(dir, prefix) - if err != nil { - return "", err - } - return longpath.AddPrefix(tempDir), nil -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/ioutils/writeflusher.go b/vendor/github.com/hyperhq/hypercli/pkg/ioutils/writeflusher.go deleted file mode 100644 index 2b35a2666..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/ioutils/writeflusher.go +++ /dev/null @@ -1,92 +0,0 @@ -package ioutils - -import ( - "errors" - "io" - "net/http" - "sync" -) - -// WriteFlusher wraps the Write and Flush operation ensuring that every write -// is a flush. In addition, the Close method can be called to intercept -// Read/Write calls if the targets lifecycle has already ended. -type WriteFlusher struct { - mu sync.Mutex - w io.Writer - flusher http.Flusher - flushed bool - closed error - - // TODO(stevvooe): Use channel for closed instead, remove mutex. Using a - // channel will allow one to properly order the operations. -} - -var errWriteFlusherClosed = errors.New("writeflusher: closed") - -func (wf *WriteFlusher) Write(b []byte) (n int, err error) { - wf.mu.Lock() - defer wf.mu.Unlock() - if wf.closed != nil { - return 0, wf.closed - } - - n, err = wf.w.Write(b) - wf.flush() // every write is a flush. - return n, err -} - -// Flush the stream immediately. -func (wf *WriteFlusher) Flush() { - wf.mu.Lock() - defer wf.mu.Unlock() - - wf.flush() -} - -// flush the stream immediately without taking a lock. Used internally. -func (wf *WriteFlusher) flush() { - if wf.closed != nil { - return - } - - wf.flushed = true - wf.flusher.Flush() -} - -// Flushed returns the state of flushed. -// If it's flushed, return true, or else it return false. -func (wf *WriteFlusher) Flushed() bool { - // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to - // be used to detect whether or a response code has been issued or not. - // Another hook should be used instead. - wf.mu.Lock() - defer wf.mu.Unlock() - - return wf.flushed -} - -// Close closes the write flusher, disallowing any further writes to the -// target. After the flusher is closed, all calls to write or flush will -// result in an error. -func (wf *WriteFlusher) Close() error { - wf.mu.Lock() - defer wf.mu.Unlock() - - if wf.closed != nil { - return wf.closed - } - - wf.closed = errWriteFlusherClosed - return nil -} - -// NewWriteFlusher returns a new WriteFlusher. -func NewWriteFlusher(w io.Writer) *WriteFlusher { - var flusher http.Flusher - if f, ok := w.(http.Flusher); ok { - flusher = f - } else { - flusher = &NopFlusher{} - } - return &WriteFlusher{w: w, flusher: flusher} -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/ioutils/writers.go b/vendor/github.com/hyperhq/hypercli/pkg/ioutils/writers.go deleted file mode 100644 index ccc7f9c23..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/ioutils/writers.go +++ /dev/null @@ -1,66 +0,0 @@ -package ioutils - -import "io" - -// NopWriter represents a type which write operation is nop. -type NopWriter struct{} - -func (*NopWriter) Write(buf []byte) (int, error) { - return len(buf), nil -} - -type nopWriteCloser struct { - io.Writer -} - -func (w *nopWriteCloser) Close() error { return nil } - -// NopWriteCloser returns a nopWriteCloser. -func NopWriteCloser(w io.Writer) io.WriteCloser { - return &nopWriteCloser{w} -} - -// NopFlusher represents a type which flush operation is nop. -type NopFlusher struct{} - -// Flush is a nop operation. -func (f *NopFlusher) Flush() {} - -type writeCloserWrapper struct { - io.Writer - closer func() error -} - -func (r *writeCloserWrapper) Close() error { - return r.closer() -} - -// NewWriteCloserWrapper returns a new io.WriteCloser. -func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { - return &writeCloserWrapper{ - Writer: r, - closer: closer, - } -} - -// WriteCounter wraps a concrete io.Writer and hold a count of the number -// of bytes written to the writer during a "session". -// This can be convenient when write return is masked -// (e.g., json.Encoder.Encode()) -type WriteCounter struct { - Count int64 - Writer io.Writer -} - -// NewWriteCounter returns a new WriteCounter. -func NewWriteCounter(w io.Writer) *WriteCounter { - return &WriteCounter{ - Writer: w, - } -} - -func (wc *WriteCounter) Write(p []byte) (count int, err error) { - count, err = wc.Writer.Write(p) - wc.Count += int64(count) - return -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/jsonlog/jsonlog.go b/vendor/github.com/hyperhq/hypercli/pkg/jsonlog/jsonlog.go deleted file mode 100644 index 422e4bbd9..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/jsonlog/jsonlog.go +++ /dev/null @@ -1,40 +0,0 @@ -package jsonlog - -import ( - "encoding/json" - "fmt" - "time" -) - -// JSONLog represents a log message, typically a single entry from a given log stream. -// JSONLogs can be easily serialized to and from JSON and support custom formatting. -type JSONLog struct { - // Log is the log message - Log string `json:"log,omitempty"` - // Stream is the log source - Stream string `json:"stream,omitempty"` - // Created is the created timestamp of log - Created time.Time `json:"time"` -} - -// Format returns the log formatted according to format -// If format is nil, returns the log message -// If format is json, returns the log marshaled in json format -// By default, returns the log with the log time formatted according to format. -func (jl *JSONLog) Format(format string) (string, error) { - if format == "" { - return jl.Log, nil - } - if format == "json" { - m, err := json.Marshal(jl) - return string(m), err - } - return fmt.Sprintf("%s %s", jl.Created.Format(format), jl.Log), nil -} - -// Reset resets the log to nil. -func (jl *JSONLog) Reset() { - jl.Log = "" - jl.Stream = "" - jl.Created = time.Time{} -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/jsonlog/jsonlog_marshalling.go b/vendor/github.com/hyperhq/hypercli/pkg/jsonlog/jsonlog_marshalling.go deleted file mode 100644 index 31b047e3e..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/jsonlog/jsonlog_marshalling.go +++ /dev/null @@ -1,180 +0,0 @@ -// This code was initially generated by ffjson -// This code was generated via the following steps: -// $ go get -u github.com/pquerna/ffjson -// $ make BIND_DIR=. shell -// $ ffjson pkg/jsonlog/jsonlog.go -// $ mv pkg/jsonglog/jsonlog_ffjson.go pkg/jsonlog/jsonlog_marshalling.go -// -// It has been modified to improve the performance of time marshalling to JSON -// and to clean it up. -// Should this code need to be regenerated when the JSONLog struct is changed, -// the relevant changes which have been made are: -// import ( -// "bytes" -//- -// "unicode/utf8" -// ) -// -// func (mj *JSONLog) MarshalJSON() ([]byte, error) { -//@@ -20,13 +16,13 @@ func (mj *JSONLog) MarshalJSON() ([]byte, error) { -// } -// return buf.Bytes(), nil -// } -//+ -// func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { -//- var err error -//- var obj []byte -//- var first bool = true -//- _ = obj -//- _ = err -//- _ = first -//+ var ( -//+ err error -//+ timestamp string -//+ first bool = true -//+ ) -// buf.WriteString(`{`) -// if len(mj.Log) != 0 { -// if first == true { -//@@ -52,11 +48,11 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { -// buf.WriteString(`,`) -// } -// buf.WriteString(`"time":`) -//- obj, err = mj.Created.MarshalJSON() -//+ timestamp, err = FastTimeMarshalJSON(mj.Created) -// if err != nil { -// return err -// } -//- buf.Write(obj) -//+ buf.WriteString(timestamp) -// buf.WriteString(`}`) -// return nil -// } -// @@ -81,9 +81,10 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { -// if len(mj.Log) != 0 { -// - if first == true { -// - first = false -// - } else { -// - buf.WriteString(`,`) -// - } -// + first = false -// buf.WriteString(`"log":`) -// ffjsonWriteJSONString(buf, mj.Log) -// } - -package jsonlog - -import ( - "bytes" - "unicode/utf8" -) - -// MarshalJSON marshals the JSONLog. -func (mj *JSONLog) MarshalJSON() ([]byte, error) { - var buf bytes.Buffer - buf.Grow(1024) - if err := mj.MarshalJSONBuf(&buf); err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// MarshalJSONBuf marshals the JSONLog and stores the result to a bytes.Buffer. -func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { - var ( - err error - timestamp string - first = true - ) - buf.WriteString(`{`) - if len(mj.Log) != 0 { - first = false - buf.WriteString(`"log":`) - ffjsonWriteJSONString(buf, mj.Log) - } - if len(mj.Stream) != 0 { - if first == true { - first = false - } else { - buf.WriteString(`,`) - } - buf.WriteString(`"stream":`) - ffjsonWriteJSONString(buf, mj.Stream) - } - if first == true { - first = false - } else { - buf.WriteString(`,`) - } - buf.WriteString(`"time":`) - timestamp, err = FastTimeMarshalJSON(mj.Created) - if err != nil { - return err - } - buf.WriteString(timestamp) - buf.WriteString(`}`) - return nil -} - -func ffjsonWriteJSONString(buf *bytes.Buffer, s string) { - const hex = "0123456789abcdef" - - buf.WriteByte('"') - start := 0 - for i := 0; i < len(s); { - if b := s[i]; b < utf8.RuneSelf { - if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { - i++ - continue - } - if start < i { - buf.WriteString(s[start:i]) - } - switch b { - case '\\', '"': - buf.WriteByte('\\') - buf.WriteByte(b) - case '\n': - buf.WriteByte('\\') - buf.WriteByte('n') - case '\r': - buf.WriteByte('\\') - buf.WriteByte('r') - default: - - buf.WriteString(`\u00`) - buf.WriteByte(hex[b>>4]) - buf.WriteByte(hex[b&0xF]) - } - i++ - start = i - continue - } - c, size := utf8.DecodeRuneInString(s[i:]) - if c == utf8.RuneError && size == 1 { - if start < i { - buf.WriteString(s[start:i]) - } - buf.WriteString(`\ufffd`) - i += size - start = i - continue - } - - if c == '\u2028' || c == '\u2029' { - if start < i { - buf.WriteString(s[start:i]) - } - buf.WriteString(`\u202`) - buf.WriteByte(hex[c&0xF]) - i += size - start = i - continue - } - i += size - } - if start < len(s) { - buf.WriteString(s[start:]) - } - buf.WriteByte('"') -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/jsonlog/jsonlogbytes.go b/vendor/github.com/hyperhq/hypercli/pkg/jsonlog/jsonlogbytes.go deleted file mode 100644 index ff7aaf16e..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/jsonlog/jsonlogbytes.go +++ /dev/null @@ -1,124 +0,0 @@ -package jsonlog - -import ( - "bytes" - "encoding/json" - "unicode/utf8" -) - -// JSONLogs is based on JSONLog. -// It allows marshalling JSONLog from Log as []byte -// and an already marshalled Created timestamp. -type JSONLogs struct { - Log []byte `json:"log,omitempty"` - Stream string `json:"stream,omitempty"` - Created string `json:"time"` - - // json-encoded bytes - RawAttrs json.RawMessage `json:"attrs,omitempty"` -} - -// MarshalJSONBuf is based on the same method from JSONLog -// It has been modified to take into account the necessary changes. -func (mj *JSONLogs) MarshalJSONBuf(buf *bytes.Buffer) error { - var first = true - - buf.WriteString(`{`) - if len(mj.Log) != 0 { - first = false - buf.WriteString(`"log":`) - ffjsonWriteJSONBytesAsString(buf, mj.Log) - } - if len(mj.Stream) != 0 { - if first == true { - first = false - } else { - buf.WriteString(`,`) - } - buf.WriteString(`"stream":`) - ffjsonWriteJSONString(buf, mj.Stream) - } - if len(mj.RawAttrs) > 0 { - if first == true { - first = false - } else { - buf.WriteString(`,`) - } - buf.WriteString(`"attrs":`) - buf.Write(mj.RawAttrs) - } - if first == true { - first = false - } else { - buf.WriteString(`,`) - } - buf.WriteString(`"time":`) - buf.WriteString(mj.Created) - buf.WriteString(`}`) - return nil -} - -// This is based on ffjsonWriteJSONBytesAsString. It has been changed -// to accept a string passed as a slice of bytes. -func ffjsonWriteJSONBytesAsString(buf *bytes.Buffer, s []byte) { - const hex = "0123456789abcdef" - - buf.WriteByte('"') - start := 0 - for i := 0; i < len(s); { - if b := s[i]; b < utf8.RuneSelf { - if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { - i++ - continue - } - if start < i { - buf.Write(s[start:i]) - } - switch b { - case '\\', '"': - buf.WriteByte('\\') - buf.WriteByte(b) - case '\n': - buf.WriteByte('\\') - buf.WriteByte('n') - case '\r': - buf.WriteByte('\\') - buf.WriteByte('r') - default: - - buf.WriteString(`\u00`) - buf.WriteByte(hex[b>>4]) - buf.WriteByte(hex[b&0xF]) - } - i++ - start = i - continue - } - c, size := utf8.DecodeRune(s[i:]) - if c == utf8.RuneError && size == 1 { - if start < i { - buf.Write(s[start:i]) - } - buf.WriteString(`\ufffd`) - i += size - start = i - continue - } - - if c == '\u2028' || c == '\u2029' { - if start < i { - buf.Write(s[start:i]) - } - buf.WriteString(`\u202`) - buf.WriteByte(hex[c&0xF]) - i += size - start = i - continue - } - i += size - } - if start < len(s) { - buf.Write(s[start:]) - } - buf.WriteByte('"') -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/jsonlog/time_marshalling.go b/vendor/github.com/hyperhq/hypercli/pkg/jsonlog/time_marshalling.go deleted file mode 100644 index 211733814..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/jsonlog/time_marshalling.go +++ /dev/null @@ -1,27 +0,0 @@ -// Package jsonlog provides helper functions to parse and print time (time.Time) as JSON. -package jsonlog - -import ( - "errors" - "time" -) - -const ( - // RFC3339NanoFixed is our own version of RFC339Nano because we want one - // that pads the nano seconds part with zeros to ensure - // the timestamps are aligned in the logs. - RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" - // JSONFormat is the format used by FastMarshalJSON - JSONFormat = `"` + time.RFC3339Nano + `"` -) - -// FastTimeMarshalJSON avoids one of the extra allocations that -// time.MarshalJSON is making. -func FastTimeMarshalJSON(t time.Time) (string, error) { - if y := t.Year(); y < 0 || y >= 10000 { - // RFC 3339 is clear that years are 4 digits exactly. - // See golang.org/issue/4556#c15 for more discussion. - return "", errors.New("time.MarshalJSON: year outside of range [0,9999]") - } - return t.Format(JSONFormat), nil -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/hyperhq/hypercli/pkg/jsonmessage/jsonmessage.go deleted file mode 100644 index ed5b4ea43..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/jsonmessage/jsonmessage.go +++ /dev/null @@ -1,221 +0,0 @@ -package jsonmessage - -import ( - "encoding/json" - "fmt" - "io" - "strings" - "time" - - "github.com/docker/go-units" - "github.com/hyperhq/hypercli/pkg/jsonlog" - "github.com/hyperhq/hypercli/pkg/term" -) - -// JSONError wraps a concrete Code and Message, `Code` is -// is a integer error code, `Message` is the error message. -type JSONError struct { - Code int `json:"code,omitempty"` - Message string `json:"message,omitempty"` -} - -func (e *JSONError) Error() string { - return e.Message -} - -// JSONProgress describes a Progress. terminalFd is the fd of the current terminal, -// Start is the initial value for the operation. Current is the current status and -// value of the progress made towards Total. Total is the end value describing when -// we made 100% progress for an operation. -type JSONProgress struct { - terminalFd uintptr - Current int64 `json:"current,omitempty"` - Total int64 `json:"total,omitempty"` - Start int64 `json:"start,omitempty"` -} - -func (p *JSONProgress) String() string { - var ( - width = 200 - pbBox string - numbersBox string - timeLeftBox string - ) - - ws, err := term.GetWinsize(p.terminalFd) - if err == nil { - width = int(ws.Width) - } - - if p.Current <= 0 && p.Total <= 0 { - return "" - } - current := units.HumanSize(float64(p.Current)) - if p.Total <= 0 { - return fmt.Sprintf("%8v", current) - } - total := units.HumanSize(float64(p.Total)) - percentage := int(float64(p.Current)/float64(p.Total)*100) / 2 - if percentage > 50 { - percentage = 50 - } - if width > 110 { - // this number can't be negative gh#7136 - numSpaces := 0 - if 50-percentage > 0 { - numSpaces = 50 - percentage - } - pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces)) - } - - numbersBox = fmt.Sprintf("%8v/%v", current, total) - - if p.Current > p.Total { - // remove total display if the reported current is wonky. - numbersBox = fmt.Sprintf("%8v", current) - } - - if p.Current > 0 && p.Start > 0 && percentage < 50 { - fromStart := time.Now().UTC().Sub(time.Unix(p.Start, 0)) - perEntry := fromStart / time.Duration(p.Current) - left := time.Duration(p.Total-p.Current) * perEntry - left = (left / time.Second) * time.Second - - if width > 50 { - timeLeftBox = " " + left.String() - } - } - return pbBox + numbersBox + timeLeftBox -} - -// JSONMessage defines a message struct. It describes -// the created time, where it from, status, ID of the -// message. It's used for docker events. -type JSONMessage struct { - Stream string `json:"stream,omitempty"` - Status string `json:"status,omitempty"` - Progress *JSONProgress `json:"progressDetail,omitempty"` - ProgressMessage string `json:"progress,omitempty"` //deprecated - ID string `json:"id,omitempty"` - From string `json:"from,omitempty"` - Time int64 `json:"time,omitempty"` - TimeNano int64 `json:"timeNano,omitempty"` - Error *JSONError `json:"errorDetail,omitempty"` - ErrorMessage string `json:"error,omitempty"` //deprecated - // Aux contains out-of-band data, such as digests for push signing. - Aux *json.RawMessage `json:"aux,omitempty"` -} - -// Display displays the JSONMessage to `out`. `isTerminal` describes if `out` -// is a terminal. If this is the case, it will erase the entire current line -// when displaying the progressbar. -func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { - if jm.Error != nil { - if jm.Error.Code == 401 { - return fmt.Errorf("Authentication is required.") - } - return jm.Error - } - var endl string - if isTerminal && jm.Stream == "" && jm.Progress != nil { - // [2K = erase entire current line - fmt.Fprintf(out, "%c[2K\r", 27) - endl = "\r" - } else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal - return nil - } - if jm.TimeNano != 0 { - fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(jsonlog.RFC3339NanoFixed)) - } else if jm.Time != 0 { - fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(jsonlog.RFC3339NanoFixed)) - } - if jm.ID != "" { - fmt.Fprintf(out, "%s: ", jm.ID) - } - if jm.From != "" { - fmt.Fprintf(out, "(from %s) ", jm.From) - } - if jm.Progress != nil && isTerminal { - fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) - } else if jm.ProgressMessage != "" { //deprecated - fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) - } else if jm.Stream != "" { - fmt.Fprintf(out, "%s%s", jm.Stream, endl) - } else { - fmt.Fprintf(out, "%s%s\n", jm.Status, endl) - } - return nil -} - -// DisplayJSONMessagesStream displays a json message stream from `in` to `out`, `isTerminal` -// describes if `out` is a terminal. If this is the case, it will print `\n` at the end of -// each line and move the cursor while displaying. -func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(*json.RawMessage)) error { - var ( - dec = json.NewDecoder(in) - ids = make(map[string]int) - ) - for { - diff := 0 - var jm JSONMessage - if err := dec.Decode(&jm); err != nil { - if err == io.EOF { - break - } - return err - } - - if jm.Aux != nil { - if auxCallback != nil { - auxCallback(jm.Aux) - } - continue - } - - if jm.Progress != nil { - jm.Progress.terminalFd = terminalFd - } - if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") { - line, ok := ids[jm.ID] - if !ok { - // NOTE: This approach of using len(id) to - // figure out the number of lines of history - // only works as long as we clear the history - // when we output something that's not - // accounted for in the map, such as a line - // with no ID. - line = len(ids) - ids[jm.ID] = line - if isTerminal { - fmt.Fprintf(out, "\n") - } - } else { - diff = len(ids) - line - } - if isTerminal { - // NOTE: this appears to be necessary even if - // diff == 0. - // [{diff}A = move cursor up diff rows - fmt.Fprintf(out, "%c[%dA", 27, diff) - } - } else { - // When outputting something that isn't progress - // output, clear the history of previous lines. We - // don't want progress entries from some previous - // operation to be updated (for example, pull -a - // with multiple tags). - ids = make(map[string]int) - } - err := jm.Display(out, isTerminal) - if jm.ID != "" && isTerminal { - // NOTE: this appears to be necessary even if - // diff == 0. - // [{diff}B = move cursor down diff rows - fmt.Fprintf(out, "%c[%dB", 27, diff) - } - if err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/longpath/longpath.go b/vendor/github.com/hyperhq/hypercli/pkg/longpath/longpath.go deleted file mode 100644 index 9b15bfff4..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/longpath/longpath.go +++ /dev/null @@ -1,26 +0,0 @@ -// longpath introduces some constants and helper functions for handling long paths -// in Windows, which are expected to be prepended with `\\?\` and followed by either -// a drive letter, a UNC server\share, or a volume identifier. - -package longpath - -import ( - "strings" -) - -// Prefix is the longpath prefix for Windows file paths. -const Prefix = `\\?\` - -// AddPrefix will add the Windows long path prefix to the path provided if -// it does not already have it. -func AddPrefix(path string) string { - if !strings.HasPrefix(path, Prefix) { - if strings.HasPrefix(path, `\\`) { - // This is a UNC path, so we need to add 'UNC' to the path as well. - path = Prefix + `UNC` + path[1:] - } else { - path = Prefix + path - } - } - return path -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/mflag/LICENSE b/vendor/github.com/hyperhq/hypercli/pkg/mflag/LICENSE deleted file mode 100644 index 9b4f4a294..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/mflag/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2014-2016 The Docker & Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/hyperhq/hypercli/pkg/mflag/flag.go b/vendor/github.com/hyperhq/hypercli/pkg/mflag/flag.go deleted file mode 100644 index 2aad6ec5c..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/mflag/flag.go +++ /dev/null @@ -1,1280 +0,0 @@ -// Copyright 2014-2016 The Docker & Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package mflag implements command-line flag parsing. -// -// Usage: -// -// Define flags using flag.String(), Bool(), Int(), etc. -// -// This declares an integer flag, -f or --flagname, stored in the pointer ip, with type *int. -// import "flag /github.com/hyperhq/hypercli/pkg/mflag" -// var ip = flag.Int([]string{"f", "-flagname"}, 1234, "help message for flagname") -// If you like, you can bind the flag to a variable using the Var() functions. -// var flagvar int -// func init() { -// // -flaghidden will work, but will be hidden from the usage -// flag.IntVar(&flagvar, []string{"f", "#flaghidden", "-flagname"}, 1234, "help message for flagname") -// } -// Or you can create custom flags that satisfy the Value interface (with -// pointer receivers) and couple them to flag parsing by -// flag.Var(&flagVal, []string{"name"}, "help message for flagname") -// For such flags, the default value is just the initial value of the variable. -// -// You can also add "deprecated" flags, they are still usable, but are not shown -// in the usage and will display a warning when you try to use them. `#` before -// an option means this option is deprecated, if there is an following option -// without `#` ahead, then that's the replacement, if not, it will just be removed: -// var ip = flag.Int([]string{"#f", "#flagname", "-flagname"}, 1234, "help message for flagname") -// this will display: `Warning: '-f' is deprecated, it will be replaced by '--flagname' soon. See usage.` or -// this will display: `Warning: '-flagname' is deprecated, it will be replaced by '--flagname' soon. See usage.` -// var ip = flag.Int([]string{"f", "#flagname"}, 1234, "help message for flagname") -// will display: `Warning: '-flagname' is deprecated, it will be removed soon. See usage.` -// so you can only use `-f`. -// -// You can also group one letter flags, bif you declare -// var v = flag.Bool([]string{"v", "-verbose"}, false, "help message for verbose") -// var s = flag.Bool([]string{"s", "-slow"}, false, "help message for slow") -// you will be able to use the -vs or -sv -// -// After all flags are defined, call -// flag.Parse() -// to parse the command line into the defined flags. -// -// Flags may then be used directly. If you're using the flags themselves, -// they are all pointers; if you bind to variables, they're values. -// fmt.Println("ip has value ", *ip) -// fmt.Println("flagvar has value ", flagvar) -// -// After parsing, the arguments after the flag are available as the -// slice flag.Args() or individually as flag.Arg(i). -// The arguments are indexed from 0 through flag.NArg()-1. -// -// Command line flag syntax: -// -flag -// -flag=x -// -flag="x" -// -flag='x' -// -flag x // non-boolean flags only -// One or two minus signs may be used; they are equivalent. -// The last form is not permitted for boolean flags because the -// meaning of the command -// cmd -x * -// will change if there is a file called 0, false, etc. You must -// use the -flag=false form to turn off a boolean flag. -// -// Flag parsing stops just before the first non-flag argument -// ("-" is a non-flag argument) or after the terminator "--". -// -// Integer flags accept 1234, 0664, 0x1234 and may be negative. -// Boolean flags may be 1, 0, t, f, true, false, TRUE, FALSE, True, False. -// Duration flags accept any input valid for time.ParseDuration. -// -// The default set of command-line flags is controlled by -// top-level functions. The FlagSet type allows one to define -// independent sets of flags, such as to implement subcommands -// in a command-line interface. The methods of FlagSet are -// analogous to the top-level functions for the command-line -// flag set. - -package mflag - -import ( - "errors" - "fmt" - "io" - "os" - "runtime" - "sort" - "strconv" - "strings" - "text/tabwriter" - "time" - - "github.com/hyperhq/hypercli/pkg/homedir" -) - -// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined. -var ErrHelp = errors.New("flag: help requested") - -// ErrRetry is the error returned if you need to try letter by letter -var ErrRetry = errors.New("flag: retry") - -// -- bool Value -type boolValue bool - -func newBoolValue(val bool, p *bool) *boolValue { - *p = val - return (*boolValue)(p) -} - -func (b *boolValue) Set(s string) error { - v, err := strconv.ParseBool(s) - *b = boolValue(v) - return err -} - -func (b *boolValue) Get() interface{} { return bool(*b) } - -func (b *boolValue) String() string { return fmt.Sprintf("%v", *b) } - -func (b *boolValue) IsBoolFlag() bool { return true } - -// optional interface to indicate boolean flags that can be -// supplied without "=value" text -type boolFlag interface { - Value - IsBoolFlag() bool -} - -// -- int Value -type intValue int - -func newIntValue(val int, p *int) *intValue { - *p = val - return (*intValue)(p) -} - -func (i *intValue) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 64) - *i = intValue(v) - return err -} - -func (i *intValue) Get() interface{} { return int(*i) } - -func (i *intValue) String() string { return fmt.Sprintf("%v", *i) } - -// -- int64 Value -type int64Value int64 - -func newInt64Value(val int64, p *int64) *int64Value { - *p = val - return (*int64Value)(p) -} - -func (i *int64Value) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 64) - *i = int64Value(v) - return err -} - -func (i *int64Value) Get() interface{} { return int64(*i) } - -func (i *int64Value) String() string { return fmt.Sprintf("%v", *i) } - -// -- uint Value -type uintValue uint - -func newUintValue(val uint, p *uint) *uintValue { - *p = val - return (*uintValue)(p) -} - -func (i *uintValue) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 64) - *i = uintValue(v) - return err -} - -func (i *uintValue) Get() interface{} { return uint(*i) } - -func (i *uintValue) String() string { return fmt.Sprintf("%v", *i) } - -// -- uint64 Value -type uint64Value uint64 - -func newUint64Value(val uint64, p *uint64) *uint64Value { - *p = val - return (*uint64Value)(p) -} - -func (i *uint64Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 64) - *i = uint64Value(v) - return err -} - -func (i *uint64Value) Get() interface{} { return uint64(*i) } - -func (i *uint64Value) String() string { return fmt.Sprintf("%v", *i) } - -// -- uint16 Value -type uint16Value uint16 - -func newUint16Value(val uint16, p *uint16) *uint16Value { - *p = val - return (*uint16Value)(p) -} - -func (i *uint16Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 16) - *i = uint16Value(v) - return err -} - -func (i *uint16Value) Get() interface{} { return uint16(*i) } - -func (i *uint16Value) String() string { return fmt.Sprintf("%v", *i) } - -// -- string Value -type stringValue string - -func newStringValue(val string, p *string) *stringValue { - *p = val - return (*stringValue)(p) -} - -func (s *stringValue) Set(val string) error { - *s = stringValue(val) - return nil -} - -func (s *stringValue) Get() interface{} { return string(*s) } - -func (s *stringValue) String() string { return fmt.Sprintf("%s", *s) } - -// -- float64 Value -type float64Value float64 - -func newFloat64Value(val float64, p *float64) *float64Value { - *p = val - return (*float64Value)(p) -} - -func (f *float64Value) Set(s string) error { - v, err := strconv.ParseFloat(s, 64) - *f = float64Value(v) - return err -} - -func (f *float64Value) Get() interface{} { return float64(*f) } - -func (f *float64Value) String() string { return fmt.Sprintf("%v", *f) } - -// -- time.Duration Value -type durationValue time.Duration - -func newDurationValue(val time.Duration, p *time.Duration) *durationValue { - *p = val - return (*durationValue)(p) -} - -func (d *durationValue) Set(s string) error { - v, err := time.ParseDuration(s) - *d = durationValue(v) - return err -} - -func (d *durationValue) Get() interface{} { return time.Duration(*d) } - -func (d *durationValue) String() string { return (*time.Duration)(d).String() } - -// Value is the interface to the dynamic value stored in a flag. -// (The default value is represented as a string.) -// -// If a Value has an IsBoolFlag() bool method returning true, -// the command-line parser makes -name equivalent to -name=true -// rather than using the next command-line argument. -type Value interface { - String() string - Set(string) error -} - -// Getter is an interface that allows the contents of a Value to be retrieved. -// It wraps the Value interface, rather than being part of it, because it -// appeared after Go 1 and its compatibility rules. All Value types provided -// by this package satisfy the Getter interface. -type Getter interface { - Value - Get() interface{} -} - -// ErrorHandling defines how to handle flag parsing errors. -type ErrorHandling int - -// ErrorHandling strategies available when a flag parsing error occurs -const ( - ContinueOnError ErrorHandling = iota - ExitOnError - PanicOnError -) - -// A FlagSet represents a set of defined flags. The zero value of a FlagSet -// has no name and has ContinueOnError error handling. -type FlagSet struct { - // Usage is the function called when an error occurs while parsing flags. - // The field is a function (not a method) that may be changed to point to - // a custom error handler. - Usage func() - ShortUsage func() - - name string - parsed bool - actual map[string]*Flag - formal map[string]*Flag - args []string // arguments after flags - errorHandling ErrorHandling - output io.Writer // nil means stderr; use Out() accessor - nArgRequirements []nArgRequirement -} - -// A Flag represents the state of a flag. -type Flag struct { - Names []string // name as it appears on command line - Usage string // help message - Value Value // value as set - DefValue string // default value (as text); for usage message -} - -type flagSlice []string - -func (p flagSlice) Len() int { return len(p) } -func (p flagSlice) Less(i, j int) bool { - pi, pj := strings.TrimPrefix(p[i], "-"), strings.TrimPrefix(p[j], "-") - lpi, lpj := strings.ToLower(pi), strings.ToLower(pj) - if lpi != lpj { - return lpi < lpj - } - return pi < pj -} -func (p flagSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -// sortFlags returns the flags as a slice in lexicographical sorted order. -func sortFlags(flags map[string]*Flag) []*Flag { - var list flagSlice - - // The sorted list is based on the first name, when flag map might use the other names. - nameMap := make(map[string]string) - - for n, f := range flags { - fName := strings.TrimPrefix(f.Names[0], "#") - nameMap[fName] = n - if len(f.Names) == 1 { - list = append(list, fName) - continue - } - - found := false - for _, name := range list { - if name == fName { - found = true - break - } - } - if !found { - list = append(list, fName) - } - } - sort.Sort(list) - result := make([]*Flag, len(list)) - for i, name := range list { - result[i] = flags[nameMap[name]] - } - return result -} - -// Name returns the name of the FlagSet. -func (fs *FlagSet) Name() string { - return fs.name -} - -// Out returns the destination for usage and error messages. -func (fs *FlagSet) Out() io.Writer { - if fs.output == nil { - return os.Stderr - } - return fs.output -} - -// SetOutput sets the destination for usage and error messages. -// If output is nil, os.Stderr is used. -func (fs *FlagSet) SetOutput(output io.Writer) { - fs.output = output -} - -// VisitAll visits the flags in lexicographical order, calling fn for each. -// It visits all flags, even those not set. -func (fs *FlagSet) VisitAll(fn func(*Flag)) { - for _, flag := range sortFlags(fs.formal) { - fn(flag) - } -} - -// VisitAll visits the command-line flags in lexicographical order, calling -// fn for each. It visits all flags, even those not set. -func VisitAll(fn func(*Flag)) { - CommandLine.VisitAll(fn) -} - -// Visit visits the flags in lexicographical order, calling fn for each. -// It visits only those flags that have been set. -func (fs *FlagSet) Visit(fn func(*Flag)) { - for _, flag := range sortFlags(fs.actual) { - fn(flag) - } -} - -// Visit visits the command-line flags in lexicographical order, calling fn -// for each. It visits only those flags that have been set. -func Visit(fn func(*Flag)) { - CommandLine.Visit(fn) -} - -// Lookup returns the Flag structure of the named flag, returning nil if none exists. -func (fs *FlagSet) Lookup(name string) *Flag { - return fs.formal[name] -} - -// IsSet indicates whether the specified flag is set in the given FlagSet -func (fs *FlagSet) IsSet(name string) bool { - return fs.actual[name] != nil -} - -// Lookup returns the Flag structure of the named command-line flag, -// returning nil if none exists. -func Lookup(name string) *Flag { - return CommandLine.formal[name] -} - -// IsSet indicates whether the specified flag was specified at all on the cmd line. -func IsSet(name string) bool { - return CommandLine.IsSet(name) -} - -type nArgRequirementType int - -// Indicator used to pass to BadArgs function -const ( - Exact nArgRequirementType = iota - Max - Min -) - -type nArgRequirement struct { - Type nArgRequirementType - N int -} - -// Require adds a requirement about the number of arguments for the FlagSet. -// The first parameter can be Exact, Max, or Min to respectively specify the exact, -// the maximum, or the minimal number of arguments required. -// The actual check is done in FlagSet.CheckArgs(). -func (fs *FlagSet) Require(nArgRequirementType nArgRequirementType, nArg int) { - fs.nArgRequirements = append(fs.nArgRequirements, nArgRequirement{nArgRequirementType, nArg}) -} - -// CheckArgs uses the requirements set by FlagSet.Require() to validate -// the number of arguments. If the requirements are not met, -// an error message string is returned. -func (fs *FlagSet) CheckArgs() (message string) { - for _, req := range fs.nArgRequirements { - var arguments string - if req.N == 1 { - arguments = "1 argument" - } else { - arguments = fmt.Sprintf("%d arguments", req.N) - } - - str := func(kind string) string { - return fmt.Sprintf("%q requires %s%s", fs.name, kind, arguments) - } - - switch req.Type { - case Exact: - if fs.NArg() != req.N { - return str("") - } - case Max: - if fs.NArg() > req.N { - return str("a maximum of ") - } - case Min: - if fs.NArg() < req.N { - return str("a minimum of ") - } - } - } - return "" -} - -// Set sets the value of the named flag. -func (fs *FlagSet) Set(name, value string) error { - flag, ok := fs.formal[name] - if !ok { - return fmt.Errorf("no such flag -%v", name) - } - if err := flag.Value.Set(value); err != nil { - return err - } - if fs.actual == nil { - fs.actual = make(map[string]*Flag) - } - fs.actual[name] = flag - return nil -} - -// Set sets the value of the named command-line flag. -func Set(name, value string) error { - return CommandLine.Set(name, value) -} - -// isZeroValue guesses whether the string represents the zero -// value for a flag. It is not accurate but in practice works OK. -func isZeroValue(value string) bool { - switch value { - case "false": - return true - case "": - return true - case "0": - return true - } - return false -} - -// PrintDefaults prints, to standard error unless configured -// otherwise, the default values of all defined flags in the set. -func (fs *FlagSet) PrintDefaults() { - writer := tabwriter.NewWriter(fs.Out(), 20, 1, 3, ' ', 0) - home := homedir.Get() - - // Don't substitute when HOME is / - if runtime.GOOS != "windows" && home == "/" { - home = "" - } - - // Add a blank line between cmd description and list of options - if fs.FlagCount() > 0 { - fmt.Fprintln(writer, "") - } - - fs.VisitAll(func(flag *Flag) { - names := []string{} - for _, name := range flag.Names { - if name[0] != '#' { - names = append(names, name) - } - } - if len(names) > 0 && len(flag.Usage) > 0 { - val := flag.DefValue - - if home != "" && strings.HasPrefix(val, home) { - val = homedir.GetShortcutString() + val[len(home):] - } - - if isZeroValue(val) { - format := " -%s" - fmt.Fprintf(writer, format, strings.Join(names, ", -")) - } else { - format := " -%s=%s" - fmt.Fprintf(writer, format, strings.Join(names, ", -"), val) - } - for _, line := range strings.Split(flag.Usage, "\n") { - fmt.Fprintln(writer, "\t", line) - } - } - }) - writer.Flush() -} - -// PrintDefaults prints to standard error the default values of all defined command-line flags. -func PrintDefaults() { - CommandLine.PrintDefaults() -} - -// defaultUsage is the default function to print a usage message. -func defaultUsage(fs *FlagSet) { - if fs.name == "" { - fmt.Fprintf(fs.Out(), "Usage:\n") - } else { - fmt.Fprintf(fs.Out(), "Usage of %s:\n", fs.name) - } - fs.PrintDefaults() -} - -// NOTE: Usage is not just defaultUsage(CommandLine) -// because it serves (via godoc flag Usage) as the example -// for how to write your own usage function. - -// Usage prints to standard error a usage message documenting all defined command-line flags. -// The function is a variable that may be changed to point to a custom function. -var Usage = func() { - fmt.Fprintf(CommandLine.Out(), "Usage of %s:\n", os.Args[0]) - PrintDefaults() -} - -// ShortUsage prints to standard error a usage message documenting the standard command layout -// The function is a variable that may be changed to point to a custom function. -var ShortUsage = func() { - fmt.Fprintf(CommandLine.output, "Usage of %s:\n", os.Args[0]) -} - -// FlagCount returns the number of flags that have been defined. -func (fs *FlagSet) FlagCount() int { return len(sortFlags(fs.formal)) } - -// FlagCountUndeprecated returns the number of undeprecated flags that have been defined. -func (fs *FlagSet) FlagCountUndeprecated() int { - count := 0 - for _, flag := range sortFlags(fs.formal) { - for _, name := range flag.Names { - if name[0] != '#' { - count++ - break - } - } - } - return count -} - -// NFlag returns the number of flags that have been set. -func (fs *FlagSet) NFlag() int { return len(fs.actual) } - -// NFlag returns the number of command-line flags that have been set. -func NFlag() int { return len(CommandLine.actual) } - -// Arg returns the i'th argument. Arg(0) is the first remaining argument -// after flags have been processed. -func (fs *FlagSet) Arg(i int) string { - if i < 0 || i >= len(fs.args) { - return "" - } - return fs.args[i] -} - -// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument -// after flags have been processed. -func Arg(i int) string { - return CommandLine.Arg(i) -} - -// NArg is the number of arguments remaining after flags have been processed. -func (fs *FlagSet) NArg() int { return len(fs.args) } - -// NArg is the number of arguments remaining after flags have been processed. -func NArg() int { return len(CommandLine.args) } - -// Args returns the non-flag arguments. -func (fs *FlagSet) Args() []string { return fs.args } - -// Args returns the non-flag command-line arguments. -func Args() []string { return CommandLine.args } - -// BoolVar defines a bool flag with specified name, default value, and usage string. -// The argument p points to a bool variable in which to store the value of the flag. -func (fs *FlagSet) BoolVar(p *bool, names []string, value bool, usage string) { - fs.Var(newBoolValue(value, p), names, usage) -} - -// BoolVar defines a bool flag with specified name, default value, and usage string. -// The argument p points to a bool variable in which to store the value of the flag. -func BoolVar(p *bool, names []string, value bool, usage string) { - CommandLine.Var(newBoolValue(value, p), names, usage) -} - -// Bool defines a bool flag with specified name, default value, and usage string. -// The return value is the address of a bool variable that stores the value of the flag. -func (fs *FlagSet) Bool(names []string, value bool, usage string) *bool { - p := new(bool) - fs.BoolVar(p, names, value, usage) - return p -} - -// Bool defines a bool flag with specified name, default value, and usage string. -// The return value is the address of a bool variable that stores the value of the flag. -func Bool(names []string, value bool, usage string) *bool { - return CommandLine.Bool(names, value, usage) -} - -// IntVar defines an int flag with specified name, default value, and usage string. -// The argument p points to an int variable in which to store the value of the flag. -func (fs *FlagSet) IntVar(p *int, names []string, value int, usage string) { - fs.Var(newIntValue(value, p), names, usage) -} - -// IntVar defines an int flag with specified name, default value, and usage string. -// The argument p points to an int variable in which to store the value of the flag. -func IntVar(p *int, names []string, value int, usage string) { - CommandLine.Var(newIntValue(value, p), names, usage) -} - -// Int defines an int flag with specified name, default value, and usage string. -// The return value is the address of an int variable that stores the value of the flag. -func (fs *FlagSet) Int(names []string, value int, usage string) *int { - p := new(int) - fs.IntVar(p, names, value, usage) - return p -} - -// Int defines an int flag with specified name, default value, and usage string. -// The return value is the address of an int variable that stores the value of the flag. -func Int(names []string, value int, usage string) *int { - return CommandLine.Int(names, value, usage) -} - -// Int64Var defines an int64 flag with specified name, default value, and usage string. -// The argument p points to an int64 variable in which to store the value of the flag. -func (fs *FlagSet) Int64Var(p *int64, names []string, value int64, usage string) { - fs.Var(newInt64Value(value, p), names, usage) -} - -// Int64Var defines an int64 flag with specified name, default value, and usage string. -// The argument p points to an int64 variable in which to store the value of the flag. -func Int64Var(p *int64, names []string, value int64, usage string) { - CommandLine.Var(newInt64Value(value, p), names, usage) -} - -// Int64 defines an int64 flag with specified name, default value, and usage string. -// The return value is the address of an int64 variable that stores the value of the flag. -func (fs *FlagSet) Int64(names []string, value int64, usage string) *int64 { - p := new(int64) - fs.Int64Var(p, names, value, usage) - return p -} - -// Int64 defines an int64 flag with specified name, default value, and usage string. -// The return value is the address of an int64 variable that stores the value of the flag. -func Int64(names []string, value int64, usage string) *int64 { - return CommandLine.Int64(names, value, usage) -} - -// UintVar defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func (fs *FlagSet) UintVar(p *uint, names []string, value uint, usage string) { - fs.Var(newUintValue(value, p), names, usage) -} - -// UintVar defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func UintVar(p *uint, names []string, value uint, usage string) { - CommandLine.Var(newUintValue(value, p), names, usage) -} - -// Uint defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func (fs *FlagSet) Uint(names []string, value uint, usage string) *uint { - p := new(uint) - fs.UintVar(p, names, value, usage) - return p -} - -// Uint defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func Uint(names []string, value uint, usage string) *uint { - return CommandLine.Uint(names, value, usage) -} - -// Uint64Var defines a uint64 flag with specified name, default value, and usage string. -// The argument p points to a uint64 variable in which to store the value of the flag. -func (fs *FlagSet) Uint64Var(p *uint64, names []string, value uint64, usage string) { - fs.Var(newUint64Value(value, p), names, usage) -} - -// Uint64Var defines a uint64 flag with specified name, default value, and usage string. -// The argument p points to a uint64 variable in which to store the value of the flag. -func Uint64Var(p *uint64, names []string, value uint64, usage string) { - CommandLine.Var(newUint64Value(value, p), names, usage) -} - -// Uint64 defines a uint64 flag with specified name, default value, and usage string. -// The return value is the address of a uint64 variable that stores the value of the flag. -func (fs *FlagSet) Uint64(names []string, value uint64, usage string) *uint64 { - p := new(uint64) - fs.Uint64Var(p, names, value, usage) - return p -} - -// Uint64 defines a uint64 flag with specified name, default value, and usage string. -// The return value is the address of a uint64 variable that stores the value of the flag. -func Uint64(names []string, value uint64, usage string) *uint64 { - return CommandLine.Uint64(names, value, usage) -} - -// Uint16Var defines a uint16 flag with specified name, default value, and usage string. -// The argument p points to a uint16 variable in which to store the value of the flag. -func (fs *FlagSet) Uint16Var(p *uint16, names []string, value uint16, usage string) { - fs.Var(newUint16Value(value, p), names, usage) -} - -// Uint16Var defines a uint16 flag with specified name, default value, and usage string. -// The argument p points to a uint16 variable in which to store the value of the flag. -func Uint16Var(p *uint16, names []string, value uint16, usage string) { - CommandLine.Var(newUint16Value(value, p), names, usage) -} - -// Uint16 defines a uint16 flag with specified name, default value, and usage string. -// The return value is the address of a uint16 variable that stores the value of the flag. -func (fs *FlagSet) Uint16(names []string, value uint16, usage string) *uint16 { - p := new(uint16) - fs.Uint16Var(p, names, value, usage) - return p -} - -// Uint16 defines a uint16 flag with specified name, default value, and usage string. -// The return value is the address of a uint16 variable that stores the value of the flag. -func Uint16(names []string, value uint16, usage string) *uint16 { - return CommandLine.Uint16(names, value, usage) -} - -// StringVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a string variable in which to store the value of the flag. -func (fs *FlagSet) StringVar(p *string, names []string, value string, usage string) { - fs.Var(newStringValue(value, p), names, usage) -} - -// StringVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a string variable in which to store the value of the flag. -func StringVar(p *string, names []string, value string, usage string) { - CommandLine.Var(newStringValue(value, p), names, usage) -} - -// String defines a string flag with specified name, default value, and usage string. -// The return value is the address of a string variable that stores the value of the flag. -func (fs *FlagSet) String(names []string, value string, usage string) *string { - p := new(string) - fs.StringVar(p, names, value, usage) - return p -} - -// String defines a string flag with specified name, default value, and usage string. -// The return value is the address of a string variable that stores the value of the flag. -func String(names []string, value string, usage string) *string { - return CommandLine.String(names, value, usage) -} - -// Float64Var defines a float64 flag with specified name, default value, and usage string. -// The argument p points to a float64 variable in which to store the value of the flag. -func (fs *FlagSet) Float64Var(p *float64, names []string, value float64, usage string) { - fs.Var(newFloat64Value(value, p), names, usage) -} - -// Float64Var defines a float64 flag with specified name, default value, and usage string. -// The argument p points to a float64 variable in which to store the value of the flag. -func Float64Var(p *float64, names []string, value float64, usage string) { - CommandLine.Var(newFloat64Value(value, p), names, usage) -} - -// Float64 defines a float64 flag with specified name, default value, and usage string. -// The return value is the address of a float64 variable that stores the value of the flag. -func (fs *FlagSet) Float64(names []string, value float64, usage string) *float64 { - p := new(float64) - fs.Float64Var(p, names, value, usage) - return p -} - -// Float64 defines a float64 flag with specified name, default value, and usage string. -// The return value is the address of a float64 variable that stores the value of the flag. -func Float64(names []string, value float64, usage string) *float64 { - return CommandLine.Float64(names, value, usage) -} - -// DurationVar defines a time.Duration flag with specified name, default value, and usage string. -// The argument p points to a time.Duration variable in which to store the value of the flag. -func (fs *FlagSet) DurationVar(p *time.Duration, names []string, value time.Duration, usage string) { - fs.Var(newDurationValue(value, p), names, usage) -} - -// DurationVar defines a time.Duration flag with specified name, default value, and usage string. -// The argument p points to a time.Duration variable in which to store the value of the flag. -func DurationVar(p *time.Duration, names []string, value time.Duration, usage string) { - CommandLine.Var(newDurationValue(value, p), names, usage) -} - -// Duration defines a time.Duration flag with specified name, default value, and usage string. -// The return value is the address of a time.Duration variable that stores the value of the flag. -func (fs *FlagSet) Duration(names []string, value time.Duration, usage string) *time.Duration { - p := new(time.Duration) - fs.DurationVar(p, names, value, usage) - return p -} - -// Duration defines a time.Duration flag with specified name, default value, and usage string. -// The return value is the address of a time.Duration variable that stores the value of the flag. -func Duration(names []string, value time.Duration, usage string) *time.Duration { - return CommandLine.Duration(names, value, usage) -} - -// Var defines a flag with the specified name and usage string. The type and -// value of the flag are represented by the first argument, of type Value, which -// typically holds a user-defined implementation of Value. For instance, the -// caller could create a flag that turns a comma-separated string into a slice -// of strings by giving the slice the methods of Value; in particular, Set would -// decompose the comma-separated string into the slice. -func (fs *FlagSet) Var(value Value, names []string, usage string) { - // Remember the default value as a string; it won't change. - flag := &Flag{names, usage, value, value.String()} - for _, name := range names { - name = strings.TrimPrefix(name, "#") - _, alreadythere := fs.formal[name] - if alreadythere { - var msg string - if fs.name == "" { - msg = fmt.Sprintf("flag redefined: %s", name) - } else { - msg = fmt.Sprintf("%s flag redefined: %s", fs.name, name) - } - fmt.Fprintln(fs.Out(), msg) - panic(msg) // Happens only if flags are declared with identical names - } - if fs.formal == nil { - fs.formal = make(map[string]*Flag) - } - fs.formal[name] = flag - } -} - -// Var defines a flag with the specified name and usage string. The type and -// value of the flag are represented by the first argument, of type Value, which -// typically holds a user-defined implementation of Value. For instance, the -// caller could create a flag that turns a comma-separated string into a slice -// of strings by giving the slice the methods of Value; in particular, Set would -// decompose the comma-separated string into the slice. -func Var(value Value, names []string, usage string) { - CommandLine.Var(value, names, usage) -} - -// failf prints to standard error a formatted error and usage message and -// returns the error. -func (fs *FlagSet) failf(format string, a ...interface{}) error { - err := fmt.Errorf(format, a...) - fmt.Fprintln(fs.Out(), err) - if os.Args[0] == fs.name { - fmt.Fprintf(fs.Out(), "See '%s --help'.\n", os.Args[0]) - } else { - fmt.Fprintf(fs.Out(), "See '%s %s --help'.\n", os.Args[0], fs.name) - } - return err -} - -// usage calls the Usage method for the flag set, or the usage function if -// the flag set is CommandLine. -func (fs *FlagSet) usage() { - if fs == CommandLine { - Usage() - } else if fs.Usage == nil { - defaultUsage(fs) - } else { - fs.Usage() - } -} - -func trimQuotes(str string) string { - if len(str) == 0 { - return str - } - type quote struct { - start, end byte - } - - // All valid quote types. - quotes := []quote{ - // Double quotes - { - start: '"', - end: '"', - }, - - // Single quotes - { - start: '\'', - end: '\'', - }, - } - - for _, quote := range quotes { - // Only strip if outermost match. - if str[0] == quote.start && str[len(str)-1] == quote.end { - str = str[1 : len(str)-1] - break - } - } - - return str -} - -// parseOne parses one flag. It reports whether a flag was seen. -func (fs *FlagSet) parseOne() (bool, string, error) { - if len(fs.args) == 0 { - return false, "", nil - } - s := fs.args[0] - if len(s) == 0 || s[0] != '-' || len(s) == 1 { - return false, "", nil - } - if s[1] == '-' && len(s) == 2 { // "--" terminates the flags - fs.args = fs.args[1:] - return false, "", nil - } - name := s[1:] - if len(name) == 0 || name[0] == '=' { - return false, "", fs.failf("bad flag syntax: %s", s) - } - - // it's a flag. does it have an argument? - fs.args = fs.args[1:] - hasValue := false - value := "" - if i := strings.Index(name, "="); i != -1 { - value = trimQuotes(name[i+1:]) - hasValue = true - name = name[:i] - } - - m := fs.formal - flag, alreadythere := m[name] // BUG - if !alreadythere { - if name == "-help" || name == "help" || name == "h" { // special case for nice help message. - fs.usage() - return false, "", ErrHelp - } - if len(name) > 0 && name[0] == '-' { - return false, "", fs.failf("flag provided but not defined: -%s", name) - } - return false, name, ErrRetry - } - if fv, ok := flag.Value.(boolFlag); ok && fv.IsBoolFlag() { // special case: doesn't need an arg - if hasValue { - if err := fv.Set(value); err != nil { - return false, "", fs.failf("invalid boolean value %q for -%s: %v", value, name, err) - } - } else { - fv.Set("true") - } - } else { - // It must have a value, which might be the next argument. - if !hasValue && len(fs.args) > 0 { - // value is the next arg - hasValue = true - value, fs.args = fs.args[0], fs.args[1:] - } - if !hasValue { - return false, "", fs.failf("flag needs an argument: -%s", name) - } - if err := flag.Value.Set(value); err != nil { - return false, "", fs.failf("invalid value %q for flag -%s: %v", value, name, err) - } - } - if fs.actual == nil { - fs.actual = make(map[string]*Flag) - } - fs.actual[name] = flag - for i, n := range flag.Names { - if n == fmt.Sprintf("#%s", name) { - replacement := "" - for j := i; j < len(flag.Names); j++ { - if flag.Names[j][0] != '#' { - replacement = flag.Names[j] - break - } - } - if replacement != "" { - fmt.Fprintf(fs.Out(), "Warning: '-%s' is deprecated, it will be replaced by '-%s' soon. See usage.\n", name, replacement) - } else { - fmt.Fprintf(fs.Out(), "Warning: '-%s' is deprecated, it will be removed soon. See usage.\n", name) - } - } - } - return true, "", nil -} - -// Parse parses flag definitions from the argument list, which should not -// include the command name. Must be called after all flags in the FlagSet -// are defined and before flags are accessed by the program. -// The return value will be ErrHelp if -help was set but not defined. -func (fs *FlagSet) Parse(arguments []string) error { - fs.parsed = true - fs.args = arguments - for { - seen, name, err := fs.parseOne() - if seen { - continue - } - if err == nil { - break - } - if err == ErrRetry { - if len(name) > 1 { - err = nil - for _, letter := range strings.Split(name, "") { - fs.args = append([]string{"-" + letter}, fs.args...) - seen2, _, err2 := fs.parseOne() - if seen2 { - continue - } - if err2 != nil { - err = fs.failf("flag provided but not defined: -%s", name) - break - } - } - if err == nil { - continue - } - } else { - err = fs.failf("flag provided but not defined: -%s", name) - } - } - switch fs.errorHandling { - case ContinueOnError: - return err - case ExitOnError: - os.Exit(125) - case PanicOnError: - panic(err) - } - } - return nil -} - -// ParseFlags is a utility function that adds a help flag if withHelp is true, -// calls fs.Parse(args) and prints a relevant error message if there are -// incorrect number of arguments. It returns error only if error handling is -// set to ContinueOnError and parsing fails. If error handling is set to -// ExitOnError, it's safe to ignore the return value. -func (fs *FlagSet) ParseFlags(args []string, withHelp bool) error { - var help *bool - if withHelp { - help = fs.Bool([]string{"#help", "-help"}, false, "Print usage") - } - if err := fs.Parse(args); err != nil { - return err - } - if help != nil && *help { - fs.SetOutput(os.Stdout) - fs.Usage() - os.Exit(0) - } - if str := fs.CheckArgs(); str != "" { - fs.SetOutput(os.Stderr) - fs.ReportError(str, withHelp) - fs.ShortUsage() - os.Exit(1) - } - return nil -} - -// ReportError is a utility method that prints a user-friendly message -// containing the error that occurred during parsing and a suggestion to get help -func (fs *FlagSet) ReportError(str string, withHelp bool) { - if withHelp { - if os.Args[0] == fs.Name() { - str += ".\nSee '" + os.Args[0] + " --help'" - } else { - str += ".\nSee '" + os.Args[0] + " " + fs.Name() + " --help'" - } - } - fmt.Fprintf(fs.Out(), "%s: %s.\n", os.Args[0], str) -} - -// Parsed reports whether fs.Parse has been called. -func (fs *FlagSet) Parsed() bool { - return fs.parsed -} - -// Parse parses the command-line flags from os.Args[1:]. Must be called -// after all flags are defined and before flags are accessed by the program. -func Parse() { - // Ignore errors; CommandLine is set for ExitOnError. - CommandLine.Parse(os.Args[1:]) -} - -// Parsed returns true if the command-line flags have been parsed. -func Parsed() bool { - return CommandLine.Parsed() -} - -// CommandLine is the default set of command-line flags, parsed from os.Args. -// The top-level functions such as BoolVar, Arg, and on are wrappers for the -// methods of CommandLine. -var CommandLine = NewFlagSet(os.Args[0], ExitOnError) - -// NewFlagSet returns a new, empty flag set with the specified name and -// error handling property. -func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet { - f := &FlagSet{ - name: name, - errorHandling: errorHandling, - } - return f -} - -// Init sets the name and error handling property for a flag set. -// By default, the zero FlagSet uses an empty name and the -// ContinueOnError error handling policy. -func (fs *FlagSet) Init(name string, errorHandling ErrorHandling) { - fs.name = name - fs.errorHandling = errorHandling -} - -type mergeVal struct { - Value - key string - fset *FlagSet -} - -func (v mergeVal) Set(s string) error { - return v.fset.Set(v.key, s) -} - -func (v mergeVal) IsBoolFlag() bool { - if b, ok := v.Value.(boolFlag); ok { - return b.IsBoolFlag() - } - return false -} - -// Name returns the name of a mergeVal. -// If the original value had a name, return the original name, -// otherwise, return the key asinged to this mergeVal. -func (v mergeVal) Name() string { - type namedValue interface { - Name() string - } - if nVal, ok := v.Value.(namedValue); ok { - return nVal.Name() - } - return v.key -} - -// Merge is an helper function that merges n FlagSets into a single dest FlagSet -// In case of name collision between the flagsets it will apply -// the destination FlagSet's errorHandling behavior. -func Merge(dest *FlagSet, flagsets ...*FlagSet) error { - for _, fset := range flagsets { - if fset.formal == nil { - continue - } - for k, f := range fset.formal { - if _, ok := dest.formal[k]; ok { - var err error - if fset.name == "" { - err = fmt.Errorf("flag redefined: %s", k) - } else { - err = fmt.Errorf("%s flag redefined: %s", fset.name, k) - } - fmt.Fprintln(fset.Out(), err.Error()) - // Happens only if flags are declared with identical names - switch dest.errorHandling { - case ContinueOnError: - return err - case ExitOnError: - os.Exit(2) - case PanicOnError: - panic(err) - } - } - newF := *f - newF.Value = mergeVal{f.Value, k, fset} - if dest.formal == nil { - dest.formal = make(map[string]*Flag) - } - dest.formal[k] = &newF - } - } - return nil -} - -// IsEmpty reports if the FlagSet is actually empty. -func (fs *FlagSet) IsEmpty() bool { - return len(fs.actual) == 0 -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/plugins/client.go b/vendor/github.com/hyperhq/hypercli/pkg/plugins/client.go deleted file mode 100644 index ba7772dfb..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/plugins/client.go +++ /dev/null @@ -1,162 +0,0 @@ -package plugins - -import ( - "bytes" - "encoding/json" - "io" - "io/ioutil" - "net/http" - "strings" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/go-connections/sockets" - "github.com/docker/go-connections/tlsconfig" -) - -const ( - versionMimetype = "application/vnd.docker.plugins.v1.2+json" - defaultTimeOut = 30 -) - -// NewClient creates a new plugin client (http). -func NewClient(addr string, tlsConfig tlsconfig.Options) (*Client, error) { - tr := &http.Transport{} - - c, err := tlsconfig.Client(tlsConfig) - if err != nil { - return nil, err - } - tr.TLSClientConfig = c - - protoAndAddr := strings.Split(addr, "://") - sockets.ConfigureTCPTransport(tr, protoAndAddr[0], protoAndAddr[1]) - - scheme := protoAndAddr[0] - if scheme != "https" { - scheme = "http" - } - return &Client{&http.Client{Transport: tr}, scheme, protoAndAddr[1]}, nil -} - -// Client represents a plugin client. -type Client struct { - http *http.Client // http client to use - scheme string // scheme protocol of the plugin - addr string // http address of the plugin -} - -// Call calls the specified method with the specified arguments for the plugin. -// It will retry for 30 seconds if a failure occurs when calling. -func (c *Client) Call(serviceMethod string, args interface{}, ret interface{}) error { - var buf bytes.Buffer - if args != nil { - if err := json.NewEncoder(&buf).Encode(args); err != nil { - return err - } - } - body, err := c.callWithRetry(serviceMethod, &buf, true) - if err != nil { - return err - } - defer body.Close() - if ret != nil { - if err := json.NewDecoder(body).Decode(&ret); err != nil { - logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err) - return err - } - } - return nil -} - -// Stream calls the specified method with the specified arguments for the plugin and returns the response body -func (c *Client) Stream(serviceMethod string, args interface{}) (io.ReadCloser, error) { - var buf bytes.Buffer - if err := json.NewEncoder(&buf).Encode(args); err != nil { - return nil, err - } - return c.callWithRetry(serviceMethod, &buf, true) -} - -// SendFile calls the specified method, and passes through the IO stream -func (c *Client) SendFile(serviceMethod string, data io.Reader, ret interface{}) error { - body, err := c.callWithRetry(serviceMethod, data, true) - if err != nil { - return err - } - if err := json.NewDecoder(body).Decode(&ret); err != nil { - logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err) - return err - } - return nil -} - -func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool) (io.ReadCloser, error) { - req, err := http.NewRequest("POST", "/"+serviceMethod, data) - if err != nil { - return nil, err - } - req.Header.Add("Accept", versionMimetype) - req.URL.Scheme = c.scheme - req.URL.Host = c.addr - - var retries int - start := time.Now() - - for { - resp, err := c.http.Do(req) - if err != nil { - if !retry { - return nil, err - } - - timeOff := backoff(retries) - if abort(start, timeOff) { - return nil, err - } - retries++ - logrus.Warnf("Unable to connect to plugin: %s, retrying in %v", c.addr, timeOff) - time.Sleep(timeOff) - continue - } - - if resp.StatusCode != http.StatusOK { - b, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, &statusError{resp.StatusCode, serviceMethod, err.Error()} - } - - // Plugins' Response(s) should have an Err field indicating what went - // wrong. Try to unmarshal into ResponseErr. Otherwise fallback to just - // return the string(body) - type responseErr struct { - Err string - } - remoteErr := responseErr{} - if err := json.Unmarshal(b, &remoteErr); err == nil { - if remoteErr.Err != "" { - return nil, &statusError{resp.StatusCode, serviceMethod, remoteErr.Err} - } - } - // old way... - return nil, &statusError{resp.StatusCode, serviceMethod, string(b)} - } - return resp.Body, nil - } -} - -func backoff(retries int) time.Duration { - b, max := 1, defaultTimeOut - for b < max && retries > 0 { - b *= 2 - retries-- - } - if b > max { - b = max - } - return time.Duration(b) * time.Second -} - -func abort(start time.Time, timeOff time.Duration) bool { - return timeOff+time.Since(start) >= time.Duration(defaultTimeOut)*time.Second -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/plugins/discovery.go b/vendor/github.com/hyperhq/hypercli/pkg/plugins/discovery.go deleted file mode 100644 index 3f7966178..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/plugins/discovery.go +++ /dev/null @@ -1,130 +0,0 @@ -package plugins - -import ( - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "net/url" - "os" - "path/filepath" - "strings" -) - -var ( - // ErrNotFound plugin not found - ErrNotFound = errors.New("plugin not found") - socketsPath = "/run/docker/plugins" - specsPaths = []string{"/etc/docker/plugins", "/usr/lib/docker/plugins"} -) - -// localRegistry defines a registry that is local (using unix socket). -type localRegistry struct{} - -func newLocalRegistry() localRegistry { - return localRegistry{} -} - -// Scan scans all the plugin paths and returns all the names it found -func Scan() ([]string, error) { - var names []string - if err := filepath.Walk(socketsPath, func(path string, fi os.FileInfo, err error) error { - if err != nil { - return nil - } - - if fi.Mode()&os.ModeSocket != 0 { - name := strings.TrimSuffix(fi.Name(), filepath.Ext(fi.Name())) - names = append(names, name) - } - return nil - }); err != nil { - return nil, err - } - - for _, path := range specsPaths { - if err := filepath.Walk(path, func(p string, fi os.FileInfo, err error) error { - if err != nil || fi.IsDir() { - return nil - } - name := strings.TrimSuffix(fi.Name(), filepath.Ext(fi.Name())) - names = append(names, name) - return nil - }); err != nil { - return nil, err - } - } - return names, nil -} - -// Plugin returns the plugin registered with the given name (or returns an error). -func (l *localRegistry) Plugin(name string) (*Plugin, error) { - socketpaths := pluginPaths(socketsPath, name, ".sock") - - for _, p := range socketpaths { - if fi, err := os.Stat(p); err == nil && fi.Mode()&os.ModeSocket != 0 { - return newLocalPlugin(name, "unix://"+p), nil - } - } - - var txtspecpaths []string - for _, p := range specsPaths { - txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".spec")...) - txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".json")...) - } - - for _, p := range txtspecpaths { - if _, err := os.Stat(p); err == nil { - if strings.HasSuffix(p, ".json") { - return readPluginJSONInfo(name, p) - } - return readPluginInfo(name, p) - } - } - return nil, ErrNotFound -} - -func readPluginInfo(name, path string) (*Plugin, error) { - content, err := ioutil.ReadFile(path) - if err != nil { - return nil, err - } - addr := strings.TrimSpace(string(content)) - - u, err := url.Parse(addr) - if err != nil { - return nil, err - } - - if len(u.Scheme) == 0 { - return nil, fmt.Errorf("Unknown protocol") - } - - return newLocalPlugin(name, addr), nil -} - -func readPluginJSONInfo(name, path string) (*Plugin, error) { - f, err := os.Open(path) - if err != nil { - return nil, err - } - defer f.Close() - - var p Plugin - if err := json.NewDecoder(f).Decode(&p); err != nil { - return nil, err - } - p.Name = name - if len(p.TLSConfig.CAFile) == 0 { - p.TLSConfig.InsecureSkipVerify = true - } - - return &p, nil -} - -func pluginPaths(base, name, ext string) []string { - return []string{ - filepath.Join(base, name+ext), - filepath.Join(base, name, name+ext), - } -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/plugins/errors.go b/vendor/github.com/hyperhq/hypercli/pkg/plugins/errors.go deleted file mode 100644 index a1826c890..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/plugins/errors.go +++ /dev/null @@ -1,33 +0,0 @@ -package plugins - -import ( - "fmt" - "net/http" -) - -type statusError struct { - status int - method string - err string -} - -// Error returns a formated string for this error type -func (e *statusError) Error() string { - return fmt.Sprintf("%s: %v", e.method, e.err) -} - -// IsNotFound indicates if the passed in error is from an http.StatusNotFound from the plugin -func IsNotFound(err error) bool { - return isStatusError(err, http.StatusNotFound) -} - -func isStatusError(err error, status int) bool { - if err == nil { - return false - } - e, ok := err.(*statusError) - if !ok { - return false - } - return e.status == status -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/plugins/plugins.go b/vendor/github.com/hyperhq/hypercli/pkg/plugins/plugins.go deleted file mode 100644 index 7157107ba..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/plugins/plugins.go +++ /dev/null @@ -1,222 +0,0 @@ -// Package plugins provides structures and helper functions to manage Docker -// plugins. -// -// Docker discovers plugins by looking for them in the plugin directory whenever -// a user or container tries to use one by name. UNIX domain socket files must -// be located under /run/docker/plugins, whereas spec files can be located -// either under /etc/docker/plugins or /usr/lib/docker/plugins. This is handled -// by the Registry interface, which lets you list all plugins or get a plugin by -// its name if it exists. -// -// The plugins need to implement an HTTP server and bind this to the UNIX socket -// or the address specified in the spec files. -// A handshake is send at /Plugin.Activate, and plugins are expected to return -// a Manifest with a list of of Docker subsystems which this plugin implements. -// -// In order to use a plugins, you can use the ``Get`` with the name of the -// plugin and the subsystem it implements. -// -// plugin, err := plugins.Get("example", "VolumeDriver") -// if err != nil { -// return fmt.Errorf("Error looking up volume plugin example: %v", err) -// } -package plugins - -import ( - "errors" - "sync" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/go-connections/tlsconfig" -) - -var ( - // ErrNotImplements is returned if the plugin does not implement the requested driver. - ErrNotImplements = errors.New("Plugin does not implement the requested driver") -) - -type plugins struct { - sync.Mutex - plugins map[string]*Plugin -} - -var ( - storage = plugins{plugins: make(map[string]*Plugin)} - extpointHandlers = make(map[string]func(string, *Client)) -) - -// Manifest lists what a plugin implements. -type Manifest struct { - // List of subsystem the plugin implements. - Implements []string -} - -// Plugin is the definition of a docker plugin. -type Plugin struct { - // Name of the plugin - Name string `json:"-"` - // Address of the plugin - Addr string - // TLS configuration of the plugin - TLSConfig tlsconfig.Options - // Client attached to the plugin - Client *Client `json:"-"` - // Manifest of the plugin (see above) - Manifest *Manifest `json:"-"` - - activatErr error - activateOnce sync.Once -} - -func newLocalPlugin(name, addr string) *Plugin { - return &Plugin{ - Name: name, - Addr: addr, - TLSConfig: tlsconfig.Options{InsecureSkipVerify: true}, - } -} - -func (p *Plugin) activate() error { - p.activateOnce.Do(func() { - p.activatErr = p.activateWithLock() - }) - return p.activatErr -} - -func (p *Plugin) activateWithLock() error { - c, err := NewClient(p.Addr, p.TLSConfig) - if err != nil { - return err - } - p.Client = c - - m := new(Manifest) - if err = p.Client.Call("Plugin.Activate", nil, m); err != nil { - return err - } - - p.Manifest = m - - for _, iface := range m.Implements { - handler, handled := extpointHandlers[iface] - if !handled { - continue - } - handler(p.Name, p.Client) - } - return nil -} - -func (p *Plugin) implements(kind string) bool { - for _, driver := range p.Manifest.Implements { - if driver == kind { - return true - } - } - return false -} - -func load(name string) (*Plugin, error) { - return loadWithRetry(name, true) -} - -func loadWithRetry(name string, retry bool) (*Plugin, error) { - registry := newLocalRegistry() - start := time.Now() - - var retries int - for { - pl, err := registry.Plugin(name) - if err != nil { - if !retry { - return nil, err - } - - timeOff := backoff(retries) - if abort(start, timeOff) { - return nil, err - } - retries++ - logrus.Warnf("Unable to locate plugin: %s, retrying in %v", name, timeOff) - time.Sleep(timeOff) - continue - } - - storage.Lock() - storage.plugins[name] = pl - storage.Unlock() - - err = pl.activate() - - if err != nil { - storage.Lock() - delete(storage.plugins, name) - storage.Unlock() - } - - return pl, err - } -} - -func get(name string) (*Plugin, error) { - storage.Lock() - pl, ok := storage.plugins[name] - storage.Unlock() - if ok { - return pl, pl.activate() - } - return load(name) -} - -// Get returns the plugin given the specified name and requested implementation. -func Get(name, imp string) (*Plugin, error) { - pl, err := get(name) - if err != nil { - return nil, err - } - if pl.implements(imp) { - logrus.Debugf("%s implements: %s", name, imp) - return pl, nil - } - return nil, ErrNotImplements -} - -// Handle adds the specified function to the extpointHandlers. -func Handle(iface string, fn func(string, *Client)) { - extpointHandlers[iface] = fn -} - -// GetAll returns all the plugins for the specified implementation -func GetAll(imp string) ([]*Plugin, error) { - pluginNames, err := Scan() - if err != nil { - return nil, err - } - - type plLoad struct { - pl *Plugin - err error - } - - chPl := make(chan plLoad, len(pluginNames)) - for _, name := range pluginNames { - go func(name string) { - pl, err := loadWithRetry(name, false) - chPl <- plLoad{pl, err} - }(name) - } - - var out []*Plugin - for i := 0; i < len(pluginNames); i++ { - pl := <-chPl - if pl.err != nil { - logrus.Error(err) - continue - } - if pl.pl.implements(imp) { - out = append(out, pl.pl) - } - } - return out, nil -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/pools/pools.go b/vendor/github.com/hyperhq/hypercli/pkg/pools/pools.go deleted file mode 100644 index 5c2c1271d..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/pools/pools.go +++ /dev/null @@ -1,119 +0,0 @@ -// Package pools provides a collection of pools which provide various -// data types with buffers. These can be used to lower the number of -// memory allocations and reuse buffers. -// -// New pools should be added to this package to allow them to be -// shared across packages. -// -// Utility functions which operate on pools should be added to this -// package to allow them to be reused. -package pools - -import ( - "bufio" - "io" - "sync" - - "github.com/hyperhq/hypercli/pkg/ioutils" -) - -var ( - // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer. - BufioReader32KPool *BufioReaderPool - // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer. - BufioWriter32KPool *BufioWriterPool -) - -const buffer32K = 32 * 1024 - -// BufioReaderPool is a bufio reader that uses sync.Pool. -type BufioReaderPool struct { - pool sync.Pool -} - -func init() { - BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) - BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) -} - -// newBufioReaderPoolWithSize is unexported because new pools should be -// added here to be shared where required. -func newBufioReaderPoolWithSize(size int) *BufioReaderPool { - pool := sync.Pool{ - New: func() interface{} { return bufio.NewReaderSize(nil, size) }, - } - return &BufioReaderPool{pool: pool} -} - -// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool. -func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { - buf := bufPool.pool.Get().(*bufio.Reader) - buf.Reset(r) - return buf -} - -// Put puts the bufio.Reader back into the pool. -func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { - b.Reset(nil) - bufPool.pool.Put(b) -} - -// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy. -func Copy(dst io.Writer, src io.Reader) (written int64, err error) { - buf := BufioReader32KPool.Get(src) - written, err = io.Copy(dst, buf) - BufioReader32KPool.Put(buf) - return -} - -// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back -// into the pool and closes the reader if it's an io.ReadCloser. -func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { - return ioutils.NewReadCloserWrapper(r, func() error { - if readCloser, ok := r.(io.ReadCloser); ok { - readCloser.Close() - } - bufPool.Put(buf) - return nil - }) -} - -// BufioWriterPool is a bufio writer that uses sync.Pool. -type BufioWriterPool struct { - pool sync.Pool -} - -// newBufioWriterPoolWithSize is unexported because new pools should be -// added here to be shared where required. -func newBufioWriterPoolWithSize(size int) *BufioWriterPool { - pool := sync.Pool{ - New: func() interface{} { return bufio.NewWriterSize(nil, size) }, - } - return &BufioWriterPool{pool: pool} -} - -// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool. -func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { - buf := bufPool.pool.Get().(*bufio.Writer) - buf.Reset(w) - return buf -} - -// Put puts the bufio.Writer back into the pool. -func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { - b.Reset(nil) - bufPool.pool.Put(b) -} - -// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back -// into the pool and closes the writer if it's an io.Writecloser. -func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { - return ioutils.NewWriteCloserWrapper(w, func() error { - buf.Flush() - if writeCloser, ok := w.(io.WriteCloser); ok { - writeCloser.Close() - } - bufPool.Put(buf) - return nil - }) -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/promise/promise.go b/vendor/github.com/hyperhq/hypercli/pkg/promise/promise.go deleted file mode 100644 index dd52b9082..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/promise/promise.go +++ /dev/null @@ -1,11 +0,0 @@ -package promise - -// Go is a basic promise implementation: it wraps calls a function in a goroutine, -// and returns a channel which will later return the function's return value. -func Go(f func() error) chan error { - ch := make(chan error, 1) - go func() { - ch <- f() - }() - return ch -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/random/random.go b/vendor/github.com/hyperhq/hypercli/pkg/random/random.go deleted file mode 100644 index 70de4d130..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/random/random.go +++ /dev/null @@ -1,71 +0,0 @@ -package random - -import ( - cryptorand "crypto/rand" - "io" - "math" - "math/big" - "math/rand" - "sync" - "time" -) - -// Rand is a global *rand.Rand instance, which initialized with NewSource() source. -var Rand = rand.New(NewSource()) - -// Reader is a global, shared instance of a pseudorandom bytes generator. -// It doesn't consume entropy. -var Reader io.Reader = &reader{rnd: Rand} - -// copypaste from standard math/rand -type lockedSource struct { - lk sync.Mutex - src rand.Source -} - -func (r *lockedSource) Int63() (n int64) { - r.lk.Lock() - n = r.src.Int63() - r.lk.Unlock() - return -} - -func (r *lockedSource) Seed(seed int64) { - r.lk.Lock() - r.src.Seed(seed) - r.lk.Unlock() -} - -// NewSource returns math/rand.Source safe for concurrent use and initialized -// with current unix-nano timestamp -func NewSource() rand.Source { - var seed int64 - if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil { - // This should not happen, but worst-case fallback to time-based seed. - seed = time.Now().UnixNano() - } else { - seed = cryptoseed.Int64() - } - return &lockedSource{ - src: rand.NewSource(seed), - } -} - -type reader struct { - rnd *rand.Rand -} - -func (r *reader) Read(b []byte) (int, error) { - i := 0 - for { - val := r.rnd.Int63() - for val > 0 { - b[i] = byte(val) - i++ - if i == len(b) { - return i, nil - } - val >>= 8 - } - } -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/reexec/command_freebsd.go b/vendor/github.com/hyperhq/hypercli/pkg/reexec/command_freebsd.go deleted file mode 100644 index c7f797a5f..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/reexec/command_freebsd.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build freebsd - -package reexec - -import ( - "os/exec" -) - -// Self returns the path to the current process's binary. -// Uses os.Args[0]. -func Self() string { - return naiveSelf() -} - -// Command returns *exec.Cmd which have Path as current binary. -// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will -// be set to "/usr/bin/docker". -func Command(args ...string) *exec.Cmd { - return &exec.Cmd{ - Path: Self(), - Args: args, - } -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/reexec/command_linux.go b/vendor/github.com/hyperhq/hypercli/pkg/reexec/command_linux.go deleted file mode 100644 index 3c3a73a9d..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/reexec/command_linux.go +++ /dev/null @@ -1,28 +0,0 @@ -// +build linux - -package reexec - -import ( - "os/exec" - "syscall" -) - -// Self returns the path to the current process's binary. -// Returns "/proc/self/exe". -func Self() string { - return "/proc/self/exe" -} - -// Command returns *exec.Cmd which have Path as current binary. Also it setting -// SysProcAttr.Pdeathsig to SIGTERM. -// This will use the in-memory version (/proc/self/exe) of the current binary, -// it is thus safe to delete or replace the on-disk binary (os.Args[0]). -func Command(args ...string) *exec.Cmd { - return &exec.Cmd{ - Path: Self(), - Args: args, - SysProcAttr: &syscall.SysProcAttr{ - Pdeathsig: syscall.SIGTERM, - }, - } -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/reexec/command_unsupported.go b/vendor/github.com/hyperhq/hypercli/pkg/reexec/command_unsupported.go deleted file mode 100644 index ad4ea38eb..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/reexec/command_unsupported.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !linux,!windows,!freebsd - -package reexec - -import ( - "os/exec" -) - -// Command is unsupported on operating systems apart from Linux and Windows. -func Command(args ...string) *exec.Cmd { - return nil -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/reexec/command_windows.go b/vendor/github.com/hyperhq/hypercli/pkg/reexec/command_windows.go deleted file mode 100644 index 8d65e0ae1..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/reexec/command_windows.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build windows - -package reexec - -import ( - "os/exec" -) - -// Self returns the path to the current process's binary. -// Uses os.Args[0]. -func Self() string { - return naiveSelf() -} - -// Command returns *exec.Cmd which have Path as current binary. -// For example if current binary is "docker.exe" at "C:\", then cmd.Path will -// be set to "C:\docker.exe". -func Command(args ...string) *exec.Cmd { - return &exec.Cmd{ - Path: Self(), - Args: args, - } -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/reexec/reexec.go b/vendor/github.com/hyperhq/hypercli/pkg/reexec/reexec.go deleted file mode 100644 index ceb98d25f..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/reexec/reexec.go +++ /dev/null @@ -1,47 +0,0 @@ -package reexec - -import ( - "fmt" - "os" - "os/exec" - "path/filepath" -) - -var registeredInitializers = make(map[string]func()) - -// Register adds an initialization func under the specified name -func Register(name string, initializer func()) { - if _, exists := registeredInitializers[name]; exists { - panic(fmt.Sprintf("reexec func already registred under name %q", name)) - } - - registeredInitializers[name] = initializer -} - -// Init is called as the first part of the exec process and returns true if an -// initialization function was called. -func Init() bool { - initializer, exists := registeredInitializers[os.Args[0]] - if exists { - initializer() - - return true - } - return false -} - -func naiveSelf() string { - name := os.Args[0] - if filepath.Base(name) == name { - if lp, err := exec.LookPath(name); err == nil { - return lp - } - } - // handle conversion of relative paths to absolute - if absName, err := filepath.Abs(name); err == nil { - return absName - } - // if we couldn't get absolute name, return original - // (NOTE: Go only errors on Abs() if os.Getwd fails) - return name -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/selfupdate/LICENSE.md b/vendor/github.com/hyperhq/hypercli/pkg/selfupdate/LICENSE.md deleted file mode 100644 index 959266d5f..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/selfupdate/LICENSE.md +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Mark Sanborn - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/hyperhq/hypercli/pkg/stringid/stringid.go b/vendor/github.com/hyperhq/hypercli/pkg/stringid/stringid.go deleted file mode 100644 index 90fcb6d51..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/stringid/stringid.go +++ /dev/null @@ -1,71 +0,0 @@ -// Package stringid provides helper functions for dealing with string identifiers -package stringid - -import ( - "crypto/rand" - "encoding/hex" - "io" - "regexp" - "strconv" - "strings" - - "github.com/hyperhq/hypercli/pkg/random" -) - -const shortLen = 12 - -var validShortID = regexp.MustCompile("^[a-z0-9]{12}$") - -// IsShortID determines if an arbitrary string *looks like* a short ID. -func IsShortID(id string) bool { - return validShortID.MatchString(id) -} - -// TruncateID returns a shorthand version of a string identifier for convenience. -// A collision with other shorthands is very unlikely, but possible. -// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller -// will need to use a langer prefix, or the full-length Id. -func TruncateID(id string) string { - if i := strings.IndexRune(id, ':'); i >= 0 { - id = id[i+1:] - } - trimTo := shortLen - if len(id) < shortLen { - trimTo = len(id) - } - return id[:trimTo] -} - -func generateID(crypto bool) string { - b := make([]byte, 32) - r := random.Reader - if crypto { - r = rand.Reader - } - for { - if _, err := io.ReadFull(r, b); err != nil { - panic(err) // This shouldn't happen - } - id := hex.EncodeToString(b) - // if we try to parse the truncated for as an int and we don't have - // an error then the value is all numeric and causes issues when - // used as a hostname. ref #3869 - if _, err := strconv.ParseInt(TruncateID(id), 10, 64); err == nil { - continue - } - return id - } -} - -// GenerateRandomID returns an unique id. -func GenerateRandomID() string { - return generateID(true) - -} - -// GenerateNonCryptoID generates unique id without using cryptographically -// secure sources of random. -// It helps you to save entropy. -func GenerateNonCryptoID() string { - return generateID(false) -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/symlink/LICENSE.APACHE b/vendor/github.com/hyperhq/hypercli/pkg/symlink/LICENSE.APACHE deleted file mode 100644 index 34c4ea7c5..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/symlink/LICENSE.APACHE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2014-2016 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/hyperhq/hypercli/pkg/symlink/LICENSE.BSD b/vendor/github.com/hyperhq/hypercli/pkg/symlink/LICENSE.BSD deleted file mode 100644 index 9b4f4a294..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/symlink/LICENSE.BSD +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2014-2016 The Docker & Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/hyperhq/hypercli/pkg/system/chtimes.go b/vendor/github.com/hyperhq/hypercli/pkg/system/chtimes.go deleted file mode 100644 index acf3f566f..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/system/chtimes.go +++ /dev/null @@ -1,47 +0,0 @@ -package system - -import ( - "os" - "syscall" - "time" - "unsafe" -) - -var ( - maxTime time.Time -) - -func init() { - if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { - // This is a 64 bit timespec - // os.Chtimes limits time to the following - maxTime = time.Unix(0, 1<<63-1) - } else { - // This is a 32 bit timespec - maxTime = time.Unix(1<<31-1, 0) - } -} - -// Chtimes changes the access time and modified time of a file at the given path -func Chtimes(name string, atime time.Time, mtime time.Time) error { - unixMinTime := time.Unix(0, 0) - unixMaxTime := maxTime - - // If the modified time is prior to the Unix Epoch, or after the - // end of Unix Time, os.Chtimes has undefined behavior - // default to Unix Epoch in this case, just in case - - if atime.Before(unixMinTime) || atime.After(unixMaxTime) { - atime = unixMinTime - } - - if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) { - mtime = unixMinTime - } - - if err := os.Chtimes(name, atime, mtime); err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/system/errors.go b/vendor/github.com/hyperhq/hypercli/pkg/system/errors.go deleted file mode 100644 index 288318985..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/system/errors.go +++ /dev/null @@ -1,10 +0,0 @@ -package system - -import ( - "errors" -) - -var ( - // ErrNotSupportedPlatform means the platform is not supported. - ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") -) diff --git a/vendor/github.com/hyperhq/hypercli/pkg/system/events_windows.go b/vendor/github.com/hyperhq/hypercli/pkg/system/events_windows.go deleted file mode 100644 index 04e2de787..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/system/events_windows.go +++ /dev/null @@ -1,83 +0,0 @@ -package system - -// This file implements syscalls for Win32 events which are not implemented -// in golang. - -import ( - "syscall" - "unsafe" -) - -var ( - procCreateEvent = modkernel32.NewProc("CreateEventW") - procOpenEvent = modkernel32.NewProc("OpenEventW") - procSetEvent = modkernel32.NewProc("SetEvent") - procResetEvent = modkernel32.NewProc("ResetEvent") - procPulseEvent = modkernel32.NewProc("PulseEvent") -) - -// CreateEvent implements win32 CreateEventW func in golang. It will create an event object. -func CreateEvent(eventAttributes *syscall.SecurityAttributes, manualReset bool, initialState bool, name string) (handle syscall.Handle, err error) { - namep, _ := syscall.UTF16PtrFromString(name) - var _p1 uint32 - if manualReset { - _p1 = 1 - } - var _p2 uint32 - if initialState { - _p2 = 1 - } - r0, _, e1 := procCreateEvent.Call(uintptr(unsafe.Pointer(eventAttributes)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(namep))) - use(unsafe.Pointer(namep)) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - err = e1 - } - return -} - -// OpenEvent implements win32 OpenEventW func in golang. It opens an event object. -func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle syscall.Handle, err error) { - namep, _ := syscall.UTF16PtrFromString(name) - var _p1 uint32 - if inheritHandle { - _p1 = 1 - } - r0, _, e1 := procOpenEvent.Call(uintptr(desiredAccess), uintptr(_p1), uintptr(unsafe.Pointer(namep))) - use(unsafe.Pointer(namep)) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - err = e1 - } - return -} - -// SetEvent implements win32 SetEvent func in golang. -func SetEvent(handle syscall.Handle) (err error) { - return setResetPulse(handle, procSetEvent) -} - -// ResetEvent implements win32 ResetEvent func in golang. -func ResetEvent(handle syscall.Handle) (err error) { - return setResetPulse(handle, procResetEvent) -} - -// PulseEvent implements win32 PulseEvent func in golang. -func PulseEvent(handle syscall.Handle) (err error) { - return setResetPulse(handle, procPulseEvent) -} - -func setResetPulse(handle syscall.Handle, proc *syscall.LazyProc) (err error) { - r0, _, _ := proc.Call(uintptr(handle)) - if r0 != 0 { - err = syscall.Errno(r0) - } - return -} - -var temp unsafe.Pointer - -// use ensures a variable is kept alive without the GC freeing while still needed -func use(p unsafe.Pointer) { - temp = p -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/system/filesys.go b/vendor/github.com/hyperhq/hypercli/pkg/system/filesys.go deleted file mode 100644 index c14feb849..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/system/filesys.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build !windows - -package system - -import ( - "os" - "path/filepath" -) - -// MkdirAll creates a directory named path along with any necessary parents, -// with permission specified by attribute perm for all dir created. -func MkdirAll(path string, perm os.FileMode) error { - return os.MkdirAll(path, perm) -} - -// IsAbs is a platform-specific wrapper for filepath.IsAbs. -func IsAbs(path string) bool { - return filepath.IsAbs(path) -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/system/filesys_windows.go b/vendor/github.com/hyperhq/hypercli/pkg/system/filesys_windows.go deleted file mode 100644 index 16823d551..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/system/filesys_windows.go +++ /dev/null @@ -1,82 +0,0 @@ -// +build windows - -package system - -import ( - "os" - "path/filepath" - "regexp" - "strings" - "syscall" -) - -// MkdirAll implementation that is volume path aware for Windows. -func MkdirAll(path string, perm os.FileMode) error { - if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { - return nil - } - - // The rest of this method is copied from os.MkdirAll and should be kept - // as-is to ensure compatibility. - - // Fast path: if we can tell whether path is a directory or file, stop with success or error. - dir, err := os.Stat(path) - if err == nil { - if dir.IsDir() { - return nil - } - return &os.PathError{ - Op: "mkdir", - Path: path, - Err: syscall.ENOTDIR, - } - } - - // Slow path: make sure parent exists and then call Mkdir for path. - i := len(path) - for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. - i-- - } - - j := i - for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. - j-- - } - - if j > 1 { - // Create parent - err = MkdirAll(path[0:j-1], perm) - if err != nil { - return err - } - } - - // Parent now exists; invoke Mkdir and use its result. - err = os.Mkdir(path, perm) - if err != nil { - // Handle arguments like "foo/." by - // double-checking that directory doesn't exist. - dir, err1 := os.Lstat(path) - if err1 == nil && dir.IsDir() { - return nil - } - return err - } - return nil -} - -// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows, -// golang filepath.IsAbs does not consider a path \windows\system32 as absolute -// as it doesn't start with a drive-letter/colon combination. However, in -// docker we need to verify things such as WORKDIR /windows/system32 in -// a Dockerfile (which gets translated to \windows\system32 when being processed -// by the daemon. This SHOULD be treated as absolute from a docker processing -// perspective. -func IsAbs(path string) bool { - if !filepath.IsAbs(path) { - if !strings.HasPrefix(path, string(os.PathSeparator)) { - return false - } - } - return true -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/system/lstat.go b/vendor/github.com/hyperhq/hypercli/pkg/system/lstat.go deleted file mode 100644 index bd23c4d50..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/system/lstat.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build !windows - -package system - -import ( - "syscall" -) - -// Lstat takes a path to a file and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file does not exist -func Lstat(path string) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Lstat(path, s); err != nil { - return nil, err - } - return fromStatT(s) -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/system/lstat_windows.go b/vendor/github.com/hyperhq/hypercli/pkg/system/lstat_windows.go deleted file mode 100644 index 49e87eb40..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/system/lstat_windows.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build windows - -package system - -import ( - "os" -) - -// Lstat calls os.Lstat to get a fileinfo interface back. -// This is then copied into our own locally defined structure. -// Note the Linux version uses fromStatT to do the copy back, -// but that not strictly necessary when already in an OS specific module. -func Lstat(path string) (*StatT, error) { - fi, err := os.Lstat(path) - if err != nil { - return nil, err - } - - return &StatT{ - name: fi.Name(), - size: fi.Size(), - mode: fi.Mode(), - modTime: fi.ModTime(), - isDir: fi.IsDir()}, nil -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/system/meminfo.go b/vendor/github.com/hyperhq/hypercli/pkg/system/meminfo.go deleted file mode 100644 index 3b6e947e6..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/system/meminfo.go +++ /dev/null @@ -1,17 +0,0 @@ -package system - -// MemInfo contains memory statistics of the host system. -type MemInfo struct { - // Total usable RAM (i.e. physical RAM minus a few reserved bits and the - // kernel binary code). - MemTotal int64 - - // Amount of free memory. - MemFree int64 - - // Total amount of swap space available. - SwapTotal int64 - - // Amount of swap space that is currently unused. - SwapFree int64 -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/system/meminfo_linux.go b/vendor/github.com/hyperhq/hypercli/pkg/system/meminfo_linux.go deleted file mode 100644 index 66731a960..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/system/meminfo_linux.go +++ /dev/null @@ -1,66 +0,0 @@ -package system - -import ( - "bufio" - "io" - "os" - "strconv" - "strings" - - "github.com/docker/go-units" -) - -// ReadMemInfo retrieves memory statistics of the host system and returns a -// MemInfo type. -func ReadMemInfo() (*MemInfo, error) { - file, err := os.Open("/proc/meminfo") - if err != nil { - return nil, err - } - defer file.Close() - return parseMemInfo(file) -} - -// parseMemInfo parses the /proc/meminfo file into -// a MemInfo object given a io.Reader to the file. -// -// Throws error if there are problems reading from the file -func parseMemInfo(reader io.Reader) (*MemInfo, error) { - meminfo := &MemInfo{} - scanner := bufio.NewScanner(reader) - for scanner.Scan() { - // Expected format: ["MemTotal:", "1234", "kB"] - parts := strings.Fields(scanner.Text()) - - // Sanity checks: Skip malformed entries. - if len(parts) < 3 || parts[2] != "kB" { - continue - } - - // Convert to bytes. - size, err := strconv.Atoi(parts[1]) - if err != nil { - continue - } - bytes := int64(size) * units.KiB - - switch parts[0] { - case "MemTotal:": - meminfo.MemTotal = bytes - case "MemFree:": - meminfo.MemFree = bytes - case "SwapTotal:": - meminfo.SwapTotal = bytes - case "SwapFree:": - meminfo.SwapFree = bytes - } - - } - - // Handle errors that may have occurred during the reading of the file. - if err := scanner.Err(); err != nil { - return nil, err - } - - return meminfo, nil -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/system/meminfo_unsupported.go b/vendor/github.com/hyperhq/hypercli/pkg/system/meminfo_unsupported.go deleted file mode 100644 index 82ddd30c1..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/system/meminfo_unsupported.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !linux,!windows - -package system - -// ReadMemInfo is not supported on platforms other than linux and windows. -func ReadMemInfo() (*MemInfo, error) { - return nil, ErrNotSupportedPlatform -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/system/meminfo_windows.go b/vendor/github.com/hyperhq/hypercli/pkg/system/meminfo_windows.go deleted file mode 100644 index d46642598..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/system/meminfo_windows.go +++ /dev/null @@ -1,44 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" -) - -var ( - modkernel32 = syscall.NewLazyDLL("kernel32.dll") - - procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") -) - -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx -type memorystatusex struct { - dwLength uint32 - dwMemoryLoad uint32 - ullTotalPhys uint64 - ullAvailPhys uint64 - ullTotalPageFile uint64 - ullAvailPageFile uint64 - ullTotalVirtual uint64 - ullAvailVirtual uint64 - ullAvailExtendedVirtual uint64 -} - -// ReadMemInfo retrieves memory statistics of the host system and returns a -// MemInfo type. -func ReadMemInfo() (*MemInfo, error) { - msi := &memorystatusex{ - dwLength: 64, - } - r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi))) - if r1 == 0 { - return &MemInfo{}, nil - } - return &MemInfo{ - MemTotal: int64(msi.ullTotalPhys), - MemFree: int64(msi.ullAvailPhys), - SwapTotal: int64(msi.ullTotalPageFile), - SwapFree: int64(msi.ullAvailPageFile), - }, nil -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/system/mknod.go b/vendor/github.com/hyperhq/hypercli/pkg/system/mknod.go deleted file mode 100644 index 73958182b..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/system/mknod.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build !windows - -package system - -import ( - "syscall" -) - -// Mknod creates a filesystem node (file, device special file or named pipe) named path -// with attributes specified by mode and dev. -func Mknod(path string, mode uint32, dev int) error { - return syscall.Mknod(path, mode, dev) -} - -// Mkdev is used to build the value of linux devices (in /dev/) which specifies major -// and minor number of the newly created device special file. -// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. -// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, -// then the top 12 bits of the minor. -func Mkdev(major int64, minor int64) uint32 { - return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/system/mknod_windows.go b/vendor/github.com/hyperhq/hypercli/pkg/system/mknod_windows.go deleted file mode 100644 index 2e863c021..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/system/mknod_windows.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build windows - -package system - -// Mknod is not implemented on Windows. -func Mknod(path string, mode uint32, dev int) error { - return ErrNotSupportedPlatform -} - -// Mkdev is not implemented on Windows. -func Mkdev(major int64, minor int64) uint32 { - panic("Mkdev not implemented on Windows.") -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/system/path_unix.go b/vendor/github.com/hyperhq/hypercli/pkg/system/path_unix.go deleted file mode 100644 index 1b6cc9cbd..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/system/path_unix.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !windows - -package system - -// DefaultPathEnv is unix style list of directories to search for -// executables. Each directory is separated from the next by a colon -// ':' character . -const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" diff --git a/vendor/github.com/hyperhq/hypercli/pkg/system/path_windows.go b/vendor/github.com/hyperhq/hypercli/pkg/system/path_windows.go deleted file mode 100644 index 09e7f89fe..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/system/path_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build windows - -package system - -// DefaultPathEnv is deliberately empty on Windows as the default path will be set by -// the container. Docker has no context of what the default path should be. -const DefaultPathEnv = "" diff --git a/vendor/github.com/hyperhq/hypercli/pkg/system/stat.go b/vendor/github.com/hyperhq/hypercli/pkg/system/stat.go deleted file mode 100644 index 087034c5e..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/system/stat.go +++ /dev/null @@ -1,53 +0,0 @@ -// +build !windows - -package system - -import ( - "syscall" -) - -// StatT type contains status of a file. It contains metadata -// like permission, owner, group, size, etc about a file. -type StatT struct { - mode uint32 - uid uint32 - gid uint32 - rdev uint64 - size int64 - mtim syscall.Timespec -} - -// Mode returns file's permission mode. -func (s StatT) Mode() uint32 { - return s.mode -} - -// UID returns file's user id of owner. -func (s StatT) UID() uint32 { - return s.uid -} - -// GID returns file's group id of owner. -func (s StatT) GID() uint32 { - return s.gid -} - -// Rdev returns file's device ID (if it's special file). -func (s StatT) Rdev() uint64 { - return s.rdev -} - -// Size returns file's size. -func (s StatT) Size() int64 { - return s.size -} - -// Mtim returns file's last modification time. -func (s StatT) Mtim() syscall.Timespec { - return s.mtim -} - -// GetLastModification returns file's last modification time. -func (s StatT) GetLastModification() syscall.Timespec { - return s.Mtim() -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/system/stat_freebsd.go b/vendor/github.com/hyperhq/hypercli/pkg/system/stat_freebsd.go deleted file mode 100644 index d0fb6f151..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/system/stat_freebsd.go +++ /dev/null @@ -1,27 +0,0 @@ -package system - -import ( - "syscall" -) - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtimespec}, nil -} - -// Stat takes a path to a file and returns -// a system.Stat_t type pertaining to that file. -// -// Throws an error if the file does not exist -func Stat(path string) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Stat(path, s); err != nil { - return nil, err - } - return fromStatT(s) -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/system/stat_linux.go b/vendor/github.com/hyperhq/hypercli/pkg/system/stat_linux.go deleted file mode 100644 index 8b1eded13..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/system/stat_linux.go +++ /dev/null @@ -1,33 +0,0 @@ -package system - -import ( - "syscall" -) - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: s.Mode, - uid: s.Uid, - gid: s.Gid, - rdev: s.Rdev, - mtim: s.Mtim}, nil -} - -// FromStatT exists only on linux, and loads a system.StatT from a -// syscal.Stat_t. -func FromStatT(s *syscall.Stat_t) (*StatT, error) { - return fromStatT(s) -} - -// Stat takes a path to a file and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file does not exist -func Stat(path string) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Stat(path, s); err != nil { - return nil, err - } - return fromStatT(s) -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/system/stat_solaris.go b/vendor/github.com/hyperhq/hypercli/pkg/system/stat_solaris.go deleted file mode 100644 index b01d08acf..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/system/stat_solaris.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build solaris - -package system - -import ( - "syscall" -) - -// fromStatT creates a system.StatT type from a syscall.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtim}, nil -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/system/stat_unsupported.go b/vendor/github.com/hyperhq/hypercli/pkg/system/stat_unsupported.go deleted file mode 100644 index c6075d4ff..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/system/stat_unsupported.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build !linux,!windows,!freebsd,!solaris - -package system - -import ( - "syscall" -) - -// fromStatT creates a system.StatT type from a syscall.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtimespec}, nil -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/system/stat_windows.go b/vendor/github.com/hyperhq/hypercli/pkg/system/stat_windows.go deleted file mode 100644 index 39490c625..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/system/stat_windows.go +++ /dev/null @@ -1,43 +0,0 @@ -// +build windows - -package system - -import ( - "os" - "time" -) - -// StatT type contains status of a file. It contains metadata -// like name, permission, size, etc about a file. -type StatT struct { - name string - size int64 - mode os.FileMode - modTime time.Time - isDir bool -} - -// Name returns file's name. -func (s StatT) Name() string { - return s.name -} - -// Size returns file's size. -func (s StatT) Size() int64 { - return s.size -} - -// Mode returns file's permission mode. -func (s StatT) Mode() os.FileMode { - return s.mode -} - -// ModTime returns file's last modification time. -func (s StatT) ModTime() time.Time { - return s.modTime -} - -// IsDir returns whether file is actually a directory. -func (s StatT) IsDir() bool { - return s.isDir -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/system/syscall_unix.go b/vendor/github.com/hyperhq/hypercli/pkg/system/syscall_unix.go deleted file mode 100644 index f1497c587..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/system/syscall_unix.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build linux freebsd - -package system - -import "syscall" - -// Unmount is a platform-specific helper function to call -// the unmount syscall. -func Unmount(dest string) error { - return syscall.Unmount(dest, 0) -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/system/syscall_windows.go b/vendor/github.com/hyperhq/hypercli/pkg/system/syscall_windows.go deleted file mode 100644 index 273aa234b..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/system/syscall_windows.go +++ /dev/null @@ -1,36 +0,0 @@ -package system - -import ( - "fmt" - "syscall" -) - -// OSVersion is a wrapper for Windows version information -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx -type OSVersion struct { - Version uint32 - MajorVersion uint8 - MinorVersion uint8 - Build uint16 -} - -// GetOSVersion gets the operating system version on Windows. Note that -// docker.exe must be manifested to get the correct version information. -func GetOSVersion() (OSVersion, error) { - var err error - osv := OSVersion{} - osv.Version, err = syscall.GetVersion() - if err != nil { - return osv, fmt.Errorf("Failed to call GetVersion()") - } - osv.MajorVersion = uint8(osv.Version & 0xFF) - osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) - osv.Build = uint16(osv.Version >> 16) - return osv, nil -} - -// Unmount is a platform-specific helper function to call -// the unmount syscall. Not supported on Windows -func Unmount(dest string) error { - return nil -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/system/umask.go b/vendor/github.com/hyperhq/hypercli/pkg/system/umask.go deleted file mode 100644 index c670fcd75..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/system/umask.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !windows - -package system - -import ( - "syscall" -) - -// Umask sets current process's file mode creation mask to newmask -// and return oldmask. -func Umask(newmask int) (oldmask int, err error) { - return syscall.Umask(newmask), nil -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/system/umask_windows.go b/vendor/github.com/hyperhq/hypercli/pkg/system/umask_windows.go deleted file mode 100644 index 13f1de176..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/system/umask_windows.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build windows - -package system - -// Umask is not supported on the windows platform. -func Umask(newmask int) (oldmask int, err error) { - // should not be called on cli code path - return 0, ErrNotSupportedPlatform -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/system/utimes_darwin.go b/vendor/github.com/hyperhq/hypercli/pkg/system/utimes_darwin.go deleted file mode 100644 index 0a1619754..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/system/utimes_darwin.go +++ /dev/null @@ -1,8 +0,0 @@ -package system - -import "syscall" - -// LUtimesNano is not supported by darwin platform. -func LUtimesNano(path string, ts []syscall.Timespec) error { - return ErrNotSupportedPlatform -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/system/utimes_freebsd.go b/vendor/github.com/hyperhq/hypercli/pkg/system/utimes_freebsd.go deleted file mode 100644 index e2eac3b55..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/system/utimes_freebsd.go +++ /dev/null @@ -1,22 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" -) - -// LUtimesNano is used to change access and modification time of the specified path. -// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. -func LUtimesNano(path string, ts []syscall.Timespec) error { - var _path *byte - _path, err := syscall.BytePtrFromString(path) - if err != nil { - return err - } - - if _, _, err := syscall.Syscall(syscall.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != syscall.ENOSYS { - return err - } - - return nil -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/system/utimes_linux.go b/vendor/github.com/hyperhq/hypercli/pkg/system/utimes_linux.go deleted file mode 100644 index fc8a1aba9..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/system/utimes_linux.go +++ /dev/null @@ -1,26 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" -) - -// LUtimesNano is used to change access and modification time of the specified path. -// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. -func LUtimesNano(path string, ts []syscall.Timespec) error { - // These are not currently available in syscall - atFdCwd := -100 - atSymLinkNoFollow := 0x100 - - var _path *byte - _path, err := syscall.BytePtrFromString(path) - if err != nil { - return err - } - - if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(atSymLinkNoFollow), 0, 0); err != 0 && err != syscall.ENOSYS { - return err - } - - return nil -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/system/utimes_unsupported.go b/vendor/github.com/hyperhq/hypercli/pkg/system/utimes_unsupported.go deleted file mode 100644 index 50c3a0436..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/system/utimes_unsupported.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !linux,!freebsd,!darwin - -package system - -import "syscall" - -// LUtimesNano is not supported on platforms other than linux, freebsd and darwin. -func LUtimesNano(path string, ts []syscall.Timespec) error { - return ErrNotSupportedPlatform -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/system/xattrs_linux.go b/vendor/github.com/hyperhq/hypercli/pkg/system/xattrs_linux.go deleted file mode 100644 index d2e2c0579..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/system/xattrs_linux.go +++ /dev/null @@ -1,63 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" -) - -// Lgetxattr retrieves the value of the extended attribute identified by attr -// and associated with the given path in the file system. -// It will returns a nil slice and nil error if the xattr is not set. -func Lgetxattr(path string, attr string) ([]byte, error) { - pathBytes, err := syscall.BytePtrFromString(path) - if err != nil { - return nil, err - } - attrBytes, err := syscall.BytePtrFromString(attr) - if err != nil { - return nil, err - } - - dest := make([]byte, 128) - destBytes := unsafe.Pointer(&dest[0]) - sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) - if errno == syscall.ENODATA { - return nil, nil - } - if errno == syscall.ERANGE { - dest = make([]byte, sz) - destBytes := unsafe.Pointer(&dest[0]) - sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) - } - if errno != 0 { - return nil, errno - } - - return dest[:sz], nil -} - -var _zero uintptr - -// Lsetxattr sets the value of the extended attribute identified by attr -// and associated with the given path in the file system. -func Lsetxattr(path string, attr string, data []byte, flags int) error { - pathBytes, err := syscall.BytePtrFromString(path) - if err != nil { - return err - } - attrBytes, err := syscall.BytePtrFromString(attr) - if err != nil { - return err - } - var dataBytes unsafe.Pointer - if len(data) > 0 { - dataBytes = unsafe.Pointer(&data[0]) - } else { - dataBytes = unsafe.Pointer(&_zero) - } - _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0) - if errno != 0 { - return errno - } - return nil -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/system/xattrs_unsupported.go b/vendor/github.com/hyperhq/hypercli/pkg/system/xattrs_unsupported.go deleted file mode 100644 index 0114f2227..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/system/xattrs_unsupported.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !linux - -package system - -// Lgetxattr is not supported on platforms other than linux. -func Lgetxattr(path string, attr string) ([]byte, error) { - return nil, ErrNotSupportedPlatform -} - -// Lsetxattr is not supported on platforms other than linux. -func Lsetxattr(path string, attr string, data []byte, flags int) error { - return ErrNotSupportedPlatform -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/tarsum/builder_context.go b/vendor/github.com/hyperhq/hypercli/pkg/tarsum/builder_context.go deleted file mode 100644 index b42983e98..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/tarsum/builder_context.go +++ /dev/null @@ -1,21 +0,0 @@ -package tarsum - -// BuilderContext is an interface extending TarSum by adding the Remove method. -// In general there was concern about adding this method to TarSum itself -// so instead it is being added just to "BuilderContext" which will then -// only be used during the .dockerignore file processing -// - see builder/evaluator.go -type BuilderContext interface { - TarSum - Remove(string) -} - -func (bc *tarSum) Remove(filename string) { - for i, fis := range bc.sums { - if fis.Name() == filename { - bc.sums = append(bc.sums[:i], bc.sums[i+1:]...) - // Note, we don't just return because there could be - // more than one with this name - } - } -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/tarsum/fileinfosums.go b/vendor/github.com/hyperhq/hypercli/pkg/tarsum/fileinfosums.go deleted file mode 100644 index 5abf5e7ba..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/tarsum/fileinfosums.go +++ /dev/null @@ -1,126 +0,0 @@ -package tarsum - -import "sort" - -// FileInfoSumInterface provides an interface for accessing file checksum -// information within a tar file. This info is accessed through interface -// so the actual name and sum cannot be melded with. -type FileInfoSumInterface interface { - // File name - Name() string - // Checksum of this particular file and its headers - Sum() string - // Position of file in the tar - Pos() int64 -} - -type fileInfoSum struct { - name string - sum string - pos int64 -} - -func (fis fileInfoSum) Name() string { - return fis.name -} -func (fis fileInfoSum) Sum() string { - return fis.sum -} -func (fis fileInfoSum) Pos() int64 { - return fis.pos -} - -// FileInfoSums provides a list of FileInfoSumInterfaces. -type FileInfoSums []FileInfoSumInterface - -// GetFile returns the first FileInfoSumInterface with a matching name. -func (fis FileInfoSums) GetFile(name string) FileInfoSumInterface { - for i := range fis { - if fis[i].Name() == name { - return fis[i] - } - } - return nil -} - -// GetAllFile returns a FileInfoSums with all matching names. -func (fis FileInfoSums) GetAllFile(name string) FileInfoSums { - f := FileInfoSums{} - for i := range fis { - if fis[i].Name() == name { - f = append(f, fis[i]) - } - } - return f -} - -// GetDuplicatePaths returns a FileInfoSums with all duplicated paths. -func (fis FileInfoSums) GetDuplicatePaths() (dups FileInfoSums) { - seen := make(map[string]int, len(fis)) // allocate earl. no need to grow this map. - for i := range fis { - f := fis[i] - if _, ok := seen[f.Name()]; ok { - dups = append(dups, f) - } else { - seen[f.Name()] = 0 - } - } - return dups -} - -// Len returns the size of the FileInfoSums. -func (fis FileInfoSums) Len() int { return len(fis) } - -// Swap swaps two FileInfoSum values if a FileInfoSums list. -func (fis FileInfoSums) Swap(i, j int) { fis[i], fis[j] = fis[j], fis[i] } - -// SortByPos sorts FileInfoSums content by position. -func (fis FileInfoSums) SortByPos() { - sort.Sort(byPos{fis}) -} - -// SortByNames sorts FileInfoSums content by name. -func (fis FileInfoSums) SortByNames() { - sort.Sort(byName{fis}) -} - -// SortBySums sorts FileInfoSums content by sums. -func (fis FileInfoSums) SortBySums() { - dups := fis.GetDuplicatePaths() - if len(dups) > 0 { - sort.Sort(bySum{fis, dups}) - } else { - sort.Sort(bySum{fis, nil}) - } -} - -// byName is a sort.Sort helper for sorting by file names. -// If names are the same, order them by their appearance in the tar archive -type byName struct{ FileInfoSums } - -func (bn byName) Less(i, j int) bool { - if bn.FileInfoSums[i].Name() == bn.FileInfoSums[j].Name() { - return bn.FileInfoSums[i].Pos() < bn.FileInfoSums[j].Pos() - } - return bn.FileInfoSums[i].Name() < bn.FileInfoSums[j].Name() -} - -// bySum is a sort.Sort helper for sorting by the sums of all the fileinfos in the tar archive -type bySum struct { - FileInfoSums - dups FileInfoSums -} - -func (bs bySum) Less(i, j int) bool { - if bs.dups != nil && bs.FileInfoSums[i].Name() == bs.FileInfoSums[j].Name() { - return bs.FileInfoSums[i].Pos() < bs.FileInfoSums[j].Pos() - } - return bs.FileInfoSums[i].Sum() < bs.FileInfoSums[j].Sum() -} - -// byPos is a sort.Sort helper for sorting by the sums of all the fileinfos by their original order -type byPos struct{ FileInfoSums } - -func (bp byPos) Less(i, j int) bool { - return bp.FileInfoSums[i].Pos() < bp.FileInfoSums[j].Pos() -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/tarsum/tarsum.go b/vendor/github.com/hyperhq/hypercli/pkg/tarsum/tarsum.go deleted file mode 100644 index 4dc89bd41..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/tarsum/tarsum.go +++ /dev/null @@ -1,294 +0,0 @@ -// Package tarsum provides algorithms to perform checksum calculation on -// filesystem layers. -// -// The transportation of filesystems, regarding Docker, is done with tar(1) -// archives. There are a variety of tar serialization formats [2], and a key -// concern here is ensuring a repeatable checksum given a set of inputs from a -// generic tar archive. Types of transportation include distribution to and from a -// registry endpoint, saving and loading through commands or Docker daemon APIs, -// transferring the build context from client to Docker daemon, and committing the -// filesystem of a container to become an image. -// -// As tar archives are used for transit, but not preserved in many situations, the -// focus of the algorithm is to ensure the integrity of the preserved filesystem, -// while maintaining a deterministic accountability. This includes neither -// constraining the ordering or manipulation of the files during the creation or -// unpacking of the archive, nor include additional metadata state about the file -// system attributes. -package tarsum - -import ( - "archive/tar" - "bytes" - "compress/gzip" - "crypto" - "crypto/sha256" - "encoding/hex" - "errors" - "fmt" - "hash" - "io" - "strings" -) - -const ( - buf8K = 8 * 1024 - buf16K = 16 * 1024 - buf32K = 32 * 1024 -) - -// NewTarSum creates a new interface for calculating a fixed time checksum of a -// tar archive. -// -// This is used for calculating checksums of layers of an image, in some cases -// including the byte payload of the image's json metadata as well, and for -// calculating the checksums for buildcache. -func NewTarSum(r io.Reader, dc bool, v Version) (TarSum, error) { - return NewTarSumHash(r, dc, v, DefaultTHash) -} - -// NewTarSumHash creates a new TarSum, providing a THash to use rather than -// the DefaultTHash. -func NewTarSumHash(r io.Reader, dc bool, v Version, tHash THash) (TarSum, error) { - headerSelector, err := getTarHeaderSelector(v) - if err != nil { - return nil, err - } - ts := &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector, tHash: tHash} - err = ts.initTarSum() - return ts, err -} - -// NewTarSumForLabel creates a new TarSum using the provided TarSum version+hash label. -func NewTarSumForLabel(r io.Reader, disableCompression bool, label string) (TarSum, error) { - parts := strings.SplitN(label, "+", 2) - if len(parts) != 2 { - return nil, errors.New("tarsum label string should be of the form: {tarsum_version}+{hash_name}") - } - - versionName, hashName := parts[0], parts[1] - - version, ok := tarSumVersionsByName[versionName] - if !ok { - return nil, fmt.Errorf("unknown TarSum version name: %q", versionName) - } - - hashConfig, ok := standardHashConfigs[hashName] - if !ok { - return nil, fmt.Errorf("unknown TarSum hash name: %q", hashName) - } - - tHash := NewTHash(hashConfig.name, hashConfig.hash.New) - - return NewTarSumHash(r, disableCompression, version, tHash) -} - -// TarSum is the generic interface for calculating fixed time -// checksums of a tar archive. -type TarSum interface { - io.Reader - GetSums() FileInfoSums - Sum([]byte) string - Version() Version - Hash() THash -} - -// tarSum struct is the structure for a Version0 checksum calculation. -type tarSum struct { - io.Reader - tarR *tar.Reader - tarW *tar.Writer - writer writeCloseFlusher - bufTar *bytes.Buffer - bufWriter *bytes.Buffer - bufData []byte - h hash.Hash - tHash THash - sums FileInfoSums - fileCounter int64 - currentFile string - finished bool - first bool - DisableCompression bool // false by default. When false, the output gzip compressed. - tarSumVersion Version // this field is not exported so it can not be mutated during use - headerSelector tarHeaderSelector // handles selecting and ordering headers for files in the archive -} - -func (ts tarSum) Hash() THash { - return ts.tHash -} - -func (ts tarSum) Version() Version { - return ts.tarSumVersion -} - -// THash provides a hash.Hash type generator and its name. -type THash interface { - Hash() hash.Hash - Name() string -} - -// NewTHash is a convenience method for creating a THash. -func NewTHash(name string, h func() hash.Hash) THash { - return simpleTHash{n: name, h: h} -} - -type tHashConfig struct { - name string - hash crypto.Hash -} - -var ( - // NOTE: DO NOT include MD5 or SHA1, which are considered insecure. - standardHashConfigs = map[string]tHashConfig{ - "sha256": {name: "sha256", hash: crypto.SHA256}, - "sha512": {name: "sha512", hash: crypto.SHA512}, - } -) - -// DefaultTHash is default TarSum hashing algorithm - "sha256". -var DefaultTHash = NewTHash("sha256", sha256.New) - -type simpleTHash struct { - n string - h func() hash.Hash -} - -func (sth simpleTHash) Name() string { return sth.n } -func (sth simpleTHash) Hash() hash.Hash { return sth.h() } - -func (ts *tarSum) encodeHeader(h *tar.Header) error { - for _, elem := range ts.headerSelector.selectHeaders(h) { - if _, err := ts.h.Write([]byte(elem[0] + elem[1])); err != nil { - return err - } - } - return nil -} - -func (ts *tarSum) initTarSum() error { - ts.bufTar = bytes.NewBuffer([]byte{}) - ts.bufWriter = bytes.NewBuffer([]byte{}) - ts.tarR = tar.NewReader(ts.Reader) - ts.tarW = tar.NewWriter(ts.bufTar) - if !ts.DisableCompression { - ts.writer = gzip.NewWriter(ts.bufWriter) - } else { - ts.writer = &nopCloseFlusher{Writer: ts.bufWriter} - } - if ts.tHash == nil { - ts.tHash = DefaultTHash - } - ts.h = ts.tHash.Hash() - ts.h.Reset() - ts.first = true - ts.sums = FileInfoSums{} - return nil -} - -func (ts *tarSum) Read(buf []byte) (int, error) { - if ts.finished { - return ts.bufWriter.Read(buf) - } - if len(ts.bufData) < len(buf) { - switch { - case len(buf) <= buf8K: - ts.bufData = make([]byte, buf8K) - case len(buf) <= buf16K: - ts.bufData = make([]byte, buf16K) - case len(buf) <= buf32K: - ts.bufData = make([]byte, buf32K) - default: - ts.bufData = make([]byte, len(buf)) - } - } - buf2 := ts.bufData[:len(buf)] - - n, err := ts.tarR.Read(buf2) - if err != nil { - if err == io.EOF { - if _, err := ts.h.Write(buf2[:n]); err != nil { - return 0, err - } - if !ts.first { - ts.sums = append(ts.sums, fileInfoSum{name: ts.currentFile, sum: hex.EncodeToString(ts.h.Sum(nil)), pos: ts.fileCounter}) - ts.fileCounter++ - ts.h.Reset() - } else { - ts.first = false - } - - currentHeader, err := ts.tarR.Next() - if err != nil { - if err == io.EOF { - if err := ts.tarW.Close(); err != nil { - return 0, err - } - if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { - return 0, err - } - if err := ts.writer.Close(); err != nil { - return 0, err - } - ts.finished = true - return n, nil - } - return n, err - } - ts.currentFile = strings.TrimSuffix(strings.TrimPrefix(currentHeader.Name, "./"), "/") - if err := ts.encodeHeader(currentHeader); err != nil { - return 0, err - } - if err := ts.tarW.WriteHeader(currentHeader); err != nil { - return 0, err - } - if _, err := ts.tarW.Write(buf2[:n]); err != nil { - return 0, err - } - ts.tarW.Flush() - if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { - return 0, err - } - ts.writer.Flush() - - return ts.bufWriter.Read(buf) - } - return n, err - } - - // Filling the hash buffer - if _, err = ts.h.Write(buf2[:n]); err != nil { - return 0, err - } - - // Filling the tar writer - if _, err = ts.tarW.Write(buf2[:n]); err != nil { - return 0, err - } - ts.tarW.Flush() - - // Filling the output writer - if _, err = io.Copy(ts.writer, ts.bufTar); err != nil { - return 0, err - } - ts.writer.Flush() - - return ts.bufWriter.Read(buf) -} - -func (ts *tarSum) Sum(extra []byte) string { - ts.sums.SortBySums() - h := ts.tHash.Hash() - if extra != nil { - h.Write(extra) - } - for _, fis := range ts.sums { - h.Write([]byte(fis.Sum())) - } - checksum := ts.Version().String() + "+" + ts.tHash.Name() + ":" + hex.EncodeToString(h.Sum(nil)) - return checksum -} - -func (ts *tarSum) GetSums() FileInfoSums { - return ts.sums -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/tarsum/versioning.go b/vendor/github.com/hyperhq/hypercli/pkg/tarsum/versioning.go deleted file mode 100644 index 288228685..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/tarsum/versioning.go +++ /dev/null @@ -1,150 +0,0 @@ -package tarsum - -import ( - "archive/tar" - "errors" - "sort" - "strconv" - "strings" -) - -// Version is used for versioning of the TarSum algorithm -// based on the prefix of the hash used -// i.e. "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b" -type Version int - -// Prefix of "tarsum" -const ( - Version0 Version = iota - Version1 - // VersionDev this constant will be either the latest or an unsettled next-version of the TarSum calculation - VersionDev -) - -// VersionLabelForChecksum returns the label for the given tarsum -// checksum, i.e., everything before the first `+` character in -// the string or an empty string if no label separator is found. -func VersionLabelForChecksum(checksum string) string { - // Checksums are in the form: {versionLabel}+{hashID}:{hex} - sepIndex := strings.Index(checksum, "+") - if sepIndex < 0 { - return "" - } - return checksum[:sepIndex] -} - -// GetVersions gets a list of all known tarsum versions. -func GetVersions() []Version { - v := []Version{} - for k := range tarSumVersions { - v = append(v, k) - } - return v -} - -var ( - tarSumVersions = map[Version]string{ - Version0: "tarsum", - Version1: "tarsum.v1", - VersionDev: "tarsum.dev", - } - tarSumVersionsByName = map[string]Version{ - "tarsum": Version0, - "tarsum.v1": Version1, - "tarsum.dev": VersionDev, - } -) - -func (tsv Version) String() string { - return tarSumVersions[tsv] -} - -// GetVersionFromTarsum returns the Version from the provided string. -func GetVersionFromTarsum(tarsum string) (Version, error) { - tsv := tarsum - if strings.Contains(tarsum, "+") { - tsv = strings.SplitN(tarsum, "+", 2)[0] - } - for v, s := range tarSumVersions { - if s == tsv { - return v, nil - } - } - return -1, ErrNotVersion -} - -// Errors that may be returned by functions in this package -var ( - ErrNotVersion = errors.New("string does not include a TarSum Version") - ErrVersionNotImplemented = errors.New("TarSum Version is not yet implemented") -) - -// tarHeaderSelector is the interface which different versions -// of tarsum should use for selecting and ordering tar headers -// for each item in the archive. -type tarHeaderSelector interface { - selectHeaders(h *tar.Header) (orderedHeaders [][2]string) -} - -type tarHeaderSelectFunc func(h *tar.Header) (orderedHeaders [][2]string) - -func (f tarHeaderSelectFunc) selectHeaders(h *tar.Header) (orderedHeaders [][2]string) { - return f(h) -} - -func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { - return [][2]string{ - {"name", h.Name}, - {"mode", strconv.FormatInt(h.Mode, 10)}, - {"uid", strconv.Itoa(h.Uid)}, - {"gid", strconv.Itoa(h.Gid)}, - {"size", strconv.FormatInt(h.Size, 10)}, - {"mtime", strconv.FormatInt(h.ModTime.UTC().Unix(), 10)}, - {"typeflag", string([]byte{h.Typeflag})}, - {"linkname", h.Linkname}, - {"uname", h.Uname}, - {"gname", h.Gname}, - {"devmajor", strconv.FormatInt(h.Devmajor, 10)}, - {"devminor", strconv.FormatInt(h.Devminor, 10)}, - } -} - -func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { - // Get extended attributes. - xAttrKeys := make([]string, len(h.Xattrs)) - for k := range h.Xattrs { - xAttrKeys = append(xAttrKeys, k) - } - sort.Strings(xAttrKeys) - - // Make the slice with enough capacity to hold the 11 basic headers - // we want from the v0 selector plus however many xattrs we have. - orderedHeaders = make([][2]string, 0, 11+len(xAttrKeys)) - - // Copy all headers from v0 excluding the 'mtime' header (the 5th element). - v0headers := v0TarHeaderSelect(h) - orderedHeaders = append(orderedHeaders, v0headers[0:5]...) - orderedHeaders = append(orderedHeaders, v0headers[6:]...) - - // Finally, append the sorted xattrs. - for _, k := range xAttrKeys { - orderedHeaders = append(orderedHeaders, [2]string{k, h.Xattrs[k]}) - } - - return -} - -var registeredHeaderSelectors = map[Version]tarHeaderSelectFunc{ - Version0: v0TarHeaderSelect, - Version1: v1TarHeaderSelect, - VersionDev: v1TarHeaderSelect, -} - -func getTarHeaderSelector(v Version) (tarHeaderSelector, error) { - headerSelector, ok := registeredHeaderSelectors[v] - if !ok { - return nil, ErrVersionNotImplemented - } - - return headerSelector, nil -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/tarsum/writercloser.go b/vendor/github.com/hyperhq/hypercli/pkg/tarsum/writercloser.go deleted file mode 100644 index 9727ecde3..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/tarsum/writercloser.go +++ /dev/null @@ -1,22 +0,0 @@ -package tarsum - -import ( - "io" -) - -type writeCloseFlusher interface { - io.WriteCloser - Flush() error -} - -type nopCloseFlusher struct { - io.Writer -} - -func (n *nopCloseFlusher) Close() error { - return nil -} - -func (n *nopCloseFlusher) Flush() error { - return nil -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/term/ascii.go b/vendor/github.com/hyperhq/hypercli/pkg/term/ascii.go deleted file mode 100644 index f5262bccf..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/term/ascii.go +++ /dev/null @@ -1,66 +0,0 @@ -package term - -import ( - "fmt" - "strings" -) - -// ASCII list the possible supported ASCII key sequence -var ASCII = []string{ - "ctrl-@", - "ctrl-a", - "ctrl-b", - "ctrl-c", - "ctrl-d", - "ctrl-e", - "ctrl-f", - "ctrl-g", - "ctrl-h", - "ctrl-i", - "ctrl-j", - "ctrl-k", - "ctrl-l", - "ctrl-m", - "ctrl-n", - "ctrl-o", - "ctrl-p", - "ctrl-q", - "ctrl-r", - "ctrl-s", - "ctrl-t", - "ctrl-u", - "ctrl-v", - "ctrl-w", - "ctrl-x", - "ctrl-y", - "ctrl-z", - "ctrl-[", - "ctrl-\\", - "ctrl-]", - "ctrl-^", - "ctrl-_", -} - -// ToBytes converts a string representing a suite of key-sequence to the corresponding ASCII code. -func ToBytes(keys string) ([]byte, error) { - codes := []byte{} -next: - for _, key := range strings.Split(keys, ",") { - if len(key) != 1 { - for code, ctrl := range ASCII { - if ctrl == key { - codes = append(codes, byte(code)) - continue next - } - } - if key == "DEL" { - codes = append(codes, 127) - } else { - return nil, fmt.Errorf("Unknown character: '%s'", key) - } - } else { - codes = append(codes, byte(key[0])) - } - } - return codes, nil -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/term/tc_linux_cgo.go b/vendor/github.com/hyperhq/hypercli/pkg/term/tc_linux_cgo.go deleted file mode 100644 index 1005084fd..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/term/tc_linux_cgo.go +++ /dev/null @@ -1,51 +0,0 @@ -// +build linux,cgo - -package term - -import ( - "syscall" - "unsafe" -) - -// #include -import "C" - -// Termios is the Unix API for terminal I/O. -// It is passthgrouh for syscall.Termios in order to make it portable with -// other platforms where it is not available or handled differently. -type Termios syscall.Termios - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if err := tcget(fd, &oldState.termios); err != 0 { - return nil, err - } - - newState := oldState.termios - - C.cfmakeraw((*C.struct_termios)(unsafe.Pointer(&newState))) - newState.Oflag = newState.Oflag | C.OPOST - if err := tcset(fd, &newState); err != 0 { - return nil, err - } - return &oldState, nil -} - -func tcget(fd uintptr, p *Termios) syscall.Errno { - ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p))) - if ret != 0 { - return err.(syscall.Errno) - } - return 0 -} - -func tcset(fd uintptr, p *Termios) syscall.Errno { - ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p))) - if ret != 0 { - return err.(syscall.Errno) - } - return 0 -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/term/tc_other.go b/vendor/github.com/hyperhq/hypercli/pkg/term/tc_other.go deleted file mode 100644 index 266039bac..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/term/tc_other.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build !windows -// +build !linux !cgo - -package term - -import ( - "syscall" - "unsafe" -) - -func tcget(fd uintptr, p *Termios) syscall.Errno { - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p))) - return err -} - -func tcset(fd uintptr, p *Termios) syscall.Errno { - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p))) - return err -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/term/term.go b/vendor/github.com/hyperhq/hypercli/pkg/term/term.go deleted file mode 100644 index 316c39905..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/term/term.go +++ /dev/null @@ -1,132 +0,0 @@ -// +build !windows - -// Package term provides provides structures and helper functions to work with -// terminal (state, sizes). -package term - -import ( - "errors" - "io" - "os" - "os/signal" - "syscall" - "unsafe" -) - -var ( - // ErrInvalidState is returned if the state of the terminal is invalid. - ErrInvalidState = errors.New("Invalid terminal state") -) - -// State represents the state of the terminal. -type State struct { - termios Termios -} - -// Winsize represents the size of the terminal window. -type Winsize struct { - Height uint16 - Width uint16 - x uint16 - y uint16 -} - -// StdStreams returns the standard streams (stdin, stdout, stedrr). -func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { - return os.Stdin, os.Stdout, os.Stderr -} - -// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. -func GetFdInfo(in interface{}) (uintptr, bool) { - var inFd uintptr - var isTerminalIn bool - if file, ok := in.(*os.File); ok { - inFd = file.Fd() - isTerminalIn = IsTerminal(inFd) - } - return inFd, isTerminalIn -} - -// GetWinsize returns the window size based on the specified file descriptor. -func GetWinsize(fd uintptr) (*Winsize, error) { - ws := &Winsize{} - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(ws))) - // Skip errno = 0 - if err == 0 { - return ws, nil - } - return ws, err -} - -// SetWinsize tries to set the specified window size for the specified file descriptor. -func SetWinsize(fd uintptr, ws *Winsize) error { - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCSWINSZ), uintptr(unsafe.Pointer(ws))) - // Skip errno = 0 - if err == 0 { - return nil - } - return err -} - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(fd uintptr) bool { - var termios Termios - return tcget(fd, &termios) == 0 -} - -// RestoreTerminal restores the terminal connected to the given file descriptor -// to a previous state. -func RestoreTerminal(fd uintptr, state *State) error { - if state == nil { - return ErrInvalidState - } - if err := tcset(fd, &state.termios); err != 0 { - return err - } - return nil -} - -// SaveState saves the state of the terminal connected to the given file descriptor. -func SaveState(fd uintptr) (*State, error) { - var oldState State - if err := tcget(fd, &oldState.termios); err != 0 { - return nil, err - } - - return &oldState, nil -} - -// DisableEcho applies the specified state to the terminal connected to the file -// descriptor, with echo disabled. -func DisableEcho(fd uintptr, state *State) error { - newState := state.termios - newState.Lflag &^= syscall.ECHO - - if err := tcset(fd, &newState); err != 0 { - return err - } - handleInterrupt(fd, state) - return nil -} - -// SetRawTerminal puts the terminal connected to the given file descriptor into -// raw mode and returns the previous state. -func SetRawTerminal(fd uintptr) (*State, error) { - oldState, err := MakeRaw(fd) - if err != nil { - return nil, err - } - handleInterrupt(fd, oldState) - return oldState, err -} - -func handleInterrupt(fd uintptr, state *State) { - sigchan := make(chan os.Signal, 1) - signal.Notify(sigchan, os.Interrupt) - - go func() { - _ = <-sigchan - RestoreTerminal(fd, state) - os.Exit(0) - }() -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/term/term_windows.go b/vendor/github.com/hyperhq/hypercli/pkg/term/term_windows.go deleted file mode 100644 index 58e337e09..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/term/term_windows.go +++ /dev/null @@ -1,305 +0,0 @@ -// +build windows - -package term - -import ( - "io" - "os" - "os/signal" - "syscall" - - "github.com/Azure/go-ansiterm/winterm" - "github.com/hyperhq/hypercli/pkg/system" - "github.com/hyperhq/hypercli/pkg/term/windows" -) - -// State holds the console mode for the terminal. -type State struct { - inMode, outMode uint32 - inHandle, outHandle syscall.Handle -} - -// Winsize is used for window size. -type Winsize struct { - Height uint16 - Width uint16 - x uint16 - y uint16 -} - -const ( - // https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx - enableVirtualTerminalInput = 0x0200 - enableVirtualTerminalProcessing = 0x0004 -) - -// usingNativeConsole is true if we are using the Windows native console -var usingNativeConsole bool - -// StdStreams returns the standard streams (stdin, stdout, stedrr). -func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { - switch { - case os.Getenv("ConEmuANSI") == "ON": - // The ConEmu terminal emulates ANSI on output streams well. - return windows.ConEmuStreams() - case os.Getenv("MSYSTEM") != "": - // MSYS (mingw) does not emulate ANSI well. - return windows.ConsoleStreams() - default: - if useNativeConsole() { - usingNativeConsole = true - return os.Stdin, os.Stdout, os.Stderr - } - return windows.ConsoleStreams() - } -} - -// useNativeConsole determines if the docker client should use the built-in -// console which supports ANSI emulation, or fall-back to the golang emulator -// (github.com/azure/go-ansiterm). -func useNativeConsole() bool { - osv, err := system.GetOSVersion() - if err != nil { - return false - } - - // Native console is not available before major version 10 - if osv.MajorVersion < 10 { - return false - } - - // Must have a late pre-release TP4 build of Windows Server 2016/Windows 10 TH2 or later - if osv.Build < 10578 { - return false - } - - // Get the console modes. If this fails, we can't use the native console - state, err := getNativeConsole() - if err != nil { - return false - } - - // Probe the console to see if it can be enabled. - if nil != probeNativeConsole(state) { - return false - } - - // Environment variable override - if e := os.Getenv("USE_NATIVE_CONSOLE"); e != "" { - if e == "1" { - return true - } - return false - } - - // TODO Windows. The native emulator still has issues which - // mean it shouldn't be enabled for everyone. Change this next line to true - // to change the default to "enable if available". In the meantime, users - // can still try it out by using USE_NATIVE_CONSOLE env variable. - return false -} - -// getNativeConsole returns the console modes ('state') for the native Windows console -func getNativeConsole() (State, error) { - var ( - err error - state State - ) - - // Get the handle to stdout - if state.outHandle, err = syscall.GetStdHandle(syscall.STD_OUTPUT_HANDLE); err != nil { - return state, err - } - - // Get the console mode from the consoles stdout handle - if err = syscall.GetConsoleMode(state.outHandle, &state.outMode); err != nil { - return state, err - } - - // Get the handle to stdin - if state.inHandle, err = syscall.GetStdHandle(syscall.STD_INPUT_HANDLE); err != nil { - return state, err - } - - // Get the console mode from the consoles stdin handle - if err = syscall.GetConsoleMode(state.inHandle, &state.inMode); err != nil { - return state, err - } - - return state, nil -} - -// probeNativeConsole probes the console to determine if native can be supported, -func probeNativeConsole(state State) error { - if err := winterm.SetConsoleMode(uintptr(state.outHandle), state.outMode|enableVirtualTerminalProcessing); err != nil { - return err - } - defer winterm.SetConsoleMode(uintptr(state.outHandle), state.outMode) - - if err := winterm.SetConsoleMode(uintptr(state.inHandle), state.inMode|enableVirtualTerminalInput); err != nil { - return err - } - defer winterm.SetConsoleMode(uintptr(state.inHandle), state.inMode) - - return nil -} - -// enableNativeConsole turns on native console mode -func enableNativeConsole(state State) error { - if err := winterm.SetConsoleMode(uintptr(state.outHandle), state.outMode|enableVirtualTerminalProcessing); err != nil { - return err - } - - if err := winterm.SetConsoleMode(uintptr(state.inHandle), state.inMode|enableVirtualTerminalInput); err != nil { - winterm.SetConsoleMode(uintptr(state.outHandle), state.outMode) // restore out if we can - return err - } - - return nil -} - -// disableNativeConsole turns off native console mode -func disableNativeConsole(state *State) error { - // Try and restore both in an out before error checking. - errout := winterm.SetConsoleMode(uintptr(state.outHandle), state.outMode) - errin := winterm.SetConsoleMode(uintptr(state.inHandle), state.inMode) - if errout != nil { - return errout - } - if errin != nil { - return errin - } - return nil -} - -// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. -func GetFdInfo(in interface{}) (uintptr, bool) { - return windows.GetHandleInfo(in) -} - -// GetWinsize returns the window size based on the specified file descriptor. -func GetWinsize(fd uintptr) (*Winsize, error) { - info, err := winterm.GetConsoleScreenBufferInfo(fd) - if err != nil { - return nil, err - } - - winsize := &Winsize{ - Width: uint16(info.Window.Right - info.Window.Left + 1), - Height: uint16(info.Window.Bottom - info.Window.Top + 1), - x: 0, - y: 0} - - return winsize, nil -} - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(fd uintptr) bool { - return windows.IsConsole(fd) -} - -// RestoreTerminal restores the terminal connected to the given file descriptor -// to a previous state. -func RestoreTerminal(fd uintptr, state *State) error { - if usingNativeConsole { - return disableNativeConsole(state) - } - return winterm.SetConsoleMode(fd, state.outMode) -} - -// SaveState saves the state of the terminal connected to the given file descriptor. -func SaveState(fd uintptr) (*State, error) { - if usingNativeConsole { - state, err := getNativeConsole() - if err != nil { - return nil, err - } - return &state, nil - } - - mode, e := winterm.GetConsoleMode(fd) - if e != nil { - return nil, e - } - - return &State{outMode: mode}, nil -} - -// DisableEcho disables echo for the terminal connected to the given file descriptor. -// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx -func DisableEcho(fd uintptr, state *State) error { - mode := state.inMode - mode &^= winterm.ENABLE_ECHO_INPUT - mode |= winterm.ENABLE_PROCESSED_INPUT | winterm.ENABLE_LINE_INPUT - err := winterm.SetConsoleMode(fd, mode) - if err != nil { - return err - } - - // Register an interrupt handler to catch and restore prior state - restoreAtInterrupt(fd, state) - return nil -} - -// SetRawTerminal puts the terminal connected to the given file descriptor into raw -// mode and returns the previous state. -func SetRawTerminal(fd uintptr) (*State, error) { - state, err := MakeRaw(fd) - if err != nil { - return nil, err - } - - // Register an interrupt handler to catch and restore prior state - restoreAtInterrupt(fd, state) - return state, err -} - -// MakeRaw puts the terminal (Windows Console) connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be restored. -func MakeRaw(fd uintptr) (*State, error) { - state, err := SaveState(fd) - if err != nil { - return nil, err - } - - mode := state.inMode - if usingNativeConsole { - if err := enableNativeConsole(*state); err != nil { - return nil, err - } - mode |= enableVirtualTerminalInput - } - - // See - // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx - // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx - - // Disable these modes - mode &^= winterm.ENABLE_ECHO_INPUT - mode &^= winterm.ENABLE_LINE_INPUT - mode &^= winterm.ENABLE_MOUSE_INPUT - mode &^= winterm.ENABLE_WINDOW_INPUT - mode &^= winterm.ENABLE_PROCESSED_INPUT - - // Enable these modes - mode |= winterm.ENABLE_EXTENDED_FLAGS - mode |= winterm.ENABLE_INSERT_MODE - mode |= winterm.ENABLE_QUICK_EDIT_MODE - - err = winterm.SetConsoleMode(fd, mode) - if err != nil { - return nil, err - } - return state, nil -} - -func restoreAtInterrupt(fd uintptr, state *State) { - sigchan := make(chan os.Signal, 1) - signal.Notify(sigchan, os.Interrupt) - - go func() { - _ = <-sigchan - RestoreTerminal(fd, state) - os.Exit(0) - }() -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/term/termios_darwin.go b/vendor/github.com/hyperhq/hypercli/pkg/term/termios_darwin.go deleted file mode 100644 index 480db900a..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/term/termios_darwin.go +++ /dev/null @@ -1,69 +0,0 @@ -package term - -import ( - "syscall" - "unsafe" -) - -const ( - getTermios = syscall.TIOCGETA - setTermios = syscall.TIOCSETA -) - -// Termios magic numbers, passthrough to the ones defined in syscall. -const ( - IGNBRK = syscall.IGNBRK - PARMRK = syscall.PARMRK - INLCR = syscall.INLCR - IGNCR = syscall.IGNCR - ECHONL = syscall.ECHONL - CSIZE = syscall.CSIZE - ICRNL = syscall.ICRNL - ISTRIP = syscall.ISTRIP - PARENB = syscall.PARENB - ECHO = syscall.ECHO - ICANON = syscall.ICANON - ISIG = syscall.ISIG - IXON = syscall.IXON - BRKINT = syscall.BRKINT - INPCK = syscall.INPCK - OPOST = syscall.OPOST - CS8 = syscall.CS8 - IEXTEN = syscall.IEXTEN -) - -// Termios is the Unix API for terminal I/O. -type Termios struct { - Iflag uint64 - Oflag uint64 - Cflag uint64 - Lflag uint64 - Cc [20]byte - Ispeed uint64 - Ospeed uint64 -} - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { - return nil, err - } - - newState := oldState.termios - newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) - newState.Oflag &^= OPOST - newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) - newState.Cflag &^= (CSIZE | PARENB) - newState.Cflag |= CS8 - newState.Cc[syscall.VMIN] = 1 - newState.Cc[syscall.VTIME] = 0 - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { - return nil, err - } - - return &oldState, nil -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/term/termios_freebsd.go b/vendor/github.com/hyperhq/hypercli/pkg/term/termios_freebsd.go deleted file mode 100644 index ed843ad69..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/term/termios_freebsd.go +++ /dev/null @@ -1,69 +0,0 @@ -package term - -import ( - "syscall" - "unsafe" -) - -const ( - getTermios = syscall.TIOCGETA - setTermios = syscall.TIOCSETA -) - -// Termios magic numbers, passthrough to the ones defined in syscall. -const ( - IGNBRK = syscall.IGNBRK - PARMRK = syscall.PARMRK - INLCR = syscall.INLCR - IGNCR = syscall.IGNCR - ECHONL = syscall.ECHONL - CSIZE = syscall.CSIZE - ICRNL = syscall.ICRNL - ISTRIP = syscall.ISTRIP - PARENB = syscall.PARENB - ECHO = syscall.ECHO - ICANON = syscall.ICANON - ISIG = syscall.ISIG - IXON = syscall.IXON - BRKINT = syscall.BRKINT - INPCK = syscall.INPCK - OPOST = syscall.OPOST - CS8 = syscall.CS8 - IEXTEN = syscall.IEXTEN -) - -// Termios is the Unix API for terminal I/O. -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [20]byte - Ispeed uint32 - Ospeed uint32 -} - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { - return nil, err - } - - newState := oldState.termios - newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) - newState.Oflag &^= OPOST - newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) - newState.Cflag &^= (CSIZE | PARENB) - newState.Cflag |= CS8 - newState.Cc[syscall.VMIN] = 1 - newState.Cc[syscall.VTIME] = 0 - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { - return nil, err - } - - return &oldState, nil -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/term/termios_linux.go b/vendor/github.com/hyperhq/hypercli/pkg/term/termios_linux.go deleted file mode 100644 index 22921b6ae..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/term/termios_linux.go +++ /dev/null @@ -1,47 +0,0 @@ -// +build !cgo - -package term - -import ( - "syscall" - "unsafe" -) - -const ( - getTermios = syscall.TCGETS - setTermios = syscall.TCSETS -) - -// Termios is the Unix API for terminal I/O. -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [20]byte - Ispeed uint32 - Ospeed uint32 -} - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { - return nil, err - } - - newState := oldState.termios - - newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON) - newState.Oflag &^= syscall.OPOST - newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN) - newState.Cflag &^= (syscall.CSIZE | syscall.PARENB) - newState.Cflag |= syscall.CS8 - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 { - return nil, err - } - return &oldState, nil -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/term/windows/ansi_reader.go b/vendor/github.com/hyperhq/hypercli/pkg/term/windows/ansi_reader.go deleted file mode 100644 index 5b91b7834..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/term/windows/ansi_reader.go +++ /dev/null @@ -1,257 +0,0 @@ -// +build windows - -package windows - -import ( - "bytes" - "errors" - "fmt" - "os" - "strings" - "unsafe" - - ansiterm "github.com/Azure/go-ansiterm" - "github.com/Azure/go-ansiterm/winterm" -) - -const ( - escapeSequence = ansiterm.KEY_ESC_CSI -) - -// ansiReader wraps a standard input file (e.g., os.Stdin) providing ANSI sequence translation. -type ansiReader struct { - file *os.File - fd uintptr - buffer []byte - cbBuffer int - command []byte -} - -func newAnsiReader(nFile int) *ansiReader { - file, fd := winterm.GetStdFile(nFile) - return &ansiReader{ - file: file, - fd: fd, - command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), - buffer: make([]byte, 0), - } -} - -// Close closes the wrapped file. -func (ar *ansiReader) Close() (err error) { - return ar.file.Close() -} - -// Fd returns the file descriptor of the wrapped file. -func (ar *ansiReader) Fd() uintptr { - return ar.fd -} - -// Read reads up to len(p) bytes of translated input events into p. -func (ar *ansiReader) Read(p []byte) (int, error) { - if len(p) == 0 { - return 0, nil - } - - // Previously read bytes exist, read as much as we can and return - if len(ar.buffer) > 0 { - logger.Debugf("Reading previously cached bytes") - - originalLength := len(ar.buffer) - copiedLength := copy(p, ar.buffer) - - if copiedLength == originalLength { - ar.buffer = make([]byte, 0, len(p)) - } else { - ar.buffer = ar.buffer[copiedLength:] - } - - logger.Debugf("Read from cache p[%d]: % x", copiedLength, p) - return copiedLength, nil - } - - // Read and translate key events - events, err := readInputEvents(ar.fd, len(p)) - if err != nil { - return 0, err - } else if len(events) == 0 { - logger.Debug("No input events detected") - return 0, nil - } - - keyBytes := translateKeyEvents(events, []byte(escapeSequence)) - - // Save excess bytes and right-size keyBytes - if len(keyBytes) > len(p) { - logger.Debugf("Received %d keyBytes, only room for %d bytes", len(keyBytes), len(p)) - ar.buffer = keyBytes[len(p):] - keyBytes = keyBytes[:len(p)] - } else if len(keyBytes) == 0 { - logger.Debug("No key bytes returned from the translator") - return 0, nil - } - - copiedLength := copy(p, keyBytes) - if copiedLength != len(keyBytes) { - return 0, errors.New("Unexpected copy length encountered.") - } - - logger.Debugf("Read p[%d]: % x", copiedLength, p) - logger.Debugf("Read keyBytes[%d]: % x", copiedLength, keyBytes) - return copiedLength, nil -} - -// readInputEvents polls until at least one event is available. -func readInputEvents(fd uintptr, maxBytes int) ([]winterm.INPUT_RECORD, error) { - // Determine the maximum number of records to retrieve - // -- Cast around the type system to obtain the size of a single INPUT_RECORD. - // unsafe.Sizeof requires an expression vs. a type-reference; the casting - // tricks the type system into believing it has such an expression. - recordSize := int(unsafe.Sizeof(*((*winterm.INPUT_RECORD)(unsafe.Pointer(&maxBytes))))) - countRecords := maxBytes / recordSize - if countRecords > ansiterm.MAX_INPUT_EVENTS { - countRecords = ansiterm.MAX_INPUT_EVENTS - } - logger.Debugf("[windows] readInputEvents: Reading %v records (buffer size %v, record size %v)", countRecords, maxBytes, recordSize) - - // Wait for and read input events - events := make([]winterm.INPUT_RECORD, countRecords) - nEvents := uint32(0) - eventsExist, err := winterm.WaitForSingleObject(fd, winterm.WAIT_INFINITE) - if err != nil { - return nil, err - } - - if eventsExist { - err = winterm.ReadConsoleInput(fd, events, &nEvents) - if err != nil { - return nil, err - } - } - - // Return a slice restricted to the number of returned records - logger.Debugf("[windows] readInputEvents: Read %v events", nEvents) - return events[:nEvents], nil -} - -// KeyEvent Translation Helpers - -var arrowKeyMapPrefix = map[uint16]string{ - winterm.VK_UP: "%s%sA", - winterm.VK_DOWN: "%s%sB", - winterm.VK_RIGHT: "%s%sC", - winterm.VK_LEFT: "%s%sD", -} - -var keyMapPrefix = map[uint16]string{ - winterm.VK_UP: "\x1B[%sA", - winterm.VK_DOWN: "\x1B[%sB", - winterm.VK_RIGHT: "\x1B[%sC", - winterm.VK_LEFT: "\x1B[%sD", - winterm.VK_HOME: "\x1B[1%s~", // showkey shows ^[[1 - winterm.VK_END: "\x1B[4%s~", // showkey shows ^[[4 - winterm.VK_INSERT: "\x1B[2%s~", - winterm.VK_DELETE: "\x1B[3%s~", - winterm.VK_PRIOR: "\x1B[5%s~", - winterm.VK_NEXT: "\x1B[6%s~", - winterm.VK_F1: "", - winterm.VK_F2: "", - winterm.VK_F3: "\x1B[13%s~", - winterm.VK_F4: "\x1B[14%s~", - winterm.VK_F5: "\x1B[15%s~", - winterm.VK_F6: "\x1B[17%s~", - winterm.VK_F7: "\x1B[18%s~", - winterm.VK_F8: "\x1B[19%s~", - winterm.VK_F9: "\x1B[20%s~", - winterm.VK_F10: "\x1B[21%s~", - winterm.VK_F11: "\x1B[23%s~", - winterm.VK_F12: "\x1B[24%s~", -} - -// translateKeyEvents converts the input events into the appropriate ANSI string. -func translateKeyEvents(events []winterm.INPUT_RECORD, escapeSequence []byte) []byte { - var buffer bytes.Buffer - for _, event := range events { - if event.EventType == winterm.KEY_EVENT && event.KeyEvent.KeyDown != 0 { - buffer.WriteString(keyToString(&event.KeyEvent, escapeSequence)) - } - } - - return buffer.Bytes() -} - -// keyToString maps the given input event record to the corresponding string. -func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) string { - if keyEvent.UnicodeChar == 0 { - return formatVirtualKey(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence) - } - - _, alt, control := getControlKeys(keyEvent.ControlKeyState) - if control { - // TODO(azlinux): Implement following control sequences - // -D Signals the end of input from the keyboard; also exits current shell. - // -H Deletes the first character to the left of the cursor. Also called the ERASE key. - // -Q Restarts printing after it has been stopped with -s. - // -S Suspends printing on the screen (does not stop the program). - // -U Deletes all characters on the current line. Also called the KILL key. - // -E Quits current command and creates a core - - } - - // +Key generates ESC N Key - if !control && alt { - return ansiterm.KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar)) - } - - return string(keyEvent.UnicodeChar) -} - -// formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string. -func formatVirtualKey(key uint16, controlState uint32, escapeSequence []byte) string { - shift, alt, control := getControlKeys(controlState) - modifier := getControlKeysModifier(shift, alt, control) - - if format, ok := arrowKeyMapPrefix[key]; ok { - return fmt.Sprintf(format, escapeSequence, modifier) - } - - if format, ok := keyMapPrefix[key]; ok { - return fmt.Sprintf(format, modifier) - } - - return "" -} - -// getControlKeys extracts the shift, alt, and ctrl key states. -func getControlKeys(controlState uint32) (shift, alt, control bool) { - shift = 0 != (controlState & winterm.SHIFT_PRESSED) - alt = 0 != (controlState & (winterm.LEFT_ALT_PRESSED | winterm.RIGHT_ALT_PRESSED)) - control = 0 != (controlState & (winterm.LEFT_CTRL_PRESSED | winterm.RIGHT_CTRL_PRESSED)) - return shift, alt, control -} - -// getControlKeysModifier returns the ANSI modifier for the given combination of control keys. -func getControlKeysModifier(shift, alt, control bool) string { - if shift && alt && control { - return ansiterm.KEY_CONTROL_PARAM_8 - } - if alt && control { - return ansiterm.KEY_CONTROL_PARAM_7 - } - if shift && control { - return ansiterm.KEY_CONTROL_PARAM_6 - } - if control { - return ansiterm.KEY_CONTROL_PARAM_5 - } - if shift && alt { - return ansiterm.KEY_CONTROL_PARAM_4 - } - if alt { - return ansiterm.KEY_CONTROL_PARAM_3 - } - if shift { - return ansiterm.KEY_CONTROL_PARAM_2 - } - return "" -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/term/windows/ansi_writer.go b/vendor/github.com/hyperhq/hypercli/pkg/term/windows/ansi_writer.go deleted file mode 100644 index 9f3232c09..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/term/windows/ansi_writer.go +++ /dev/null @@ -1,76 +0,0 @@ -// +build windows - -package windows - -import ( - "io/ioutil" - "os" - - ansiterm "github.com/Azure/go-ansiterm" - "github.com/Azure/go-ansiterm/winterm" - "github.com/Sirupsen/logrus" -) - -var logger *logrus.Logger - -// ansiWriter wraps a standard output file (e.g., os.Stdout) providing ANSI sequence translation. -type ansiWriter struct { - file *os.File - fd uintptr - infoReset *winterm.CONSOLE_SCREEN_BUFFER_INFO - command []byte - escapeSequence []byte - inAnsiSequence bool - parser *ansiterm.AnsiParser -} - -func newAnsiWriter(nFile int) *ansiWriter { - logFile := ioutil.Discard - - if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { - logFile, _ = os.Create("ansiReaderWriter.log") - } - - logger = &logrus.Logger{ - Out: logFile, - Formatter: new(logrus.TextFormatter), - Level: logrus.DebugLevel, - } - - file, fd := winterm.GetStdFile(nFile) - info, err := winterm.GetConsoleScreenBufferInfo(fd) - if err != nil { - return nil - } - - parser := ansiterm.CreateParser("Ground", winterm.CreateWinEventHandler(fd, file)) - logger.Infof("newAnsiWriter: parser %p", parser) - - aw := &ansiWriter{ - file: file, - fd: fd, - infoReset: info, - command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), - escapeSequence: []byte(ansiterm.KEY_ESC_CSI), - parser: parser, - } - - logger.Infof("newAnsiWriter: aw.parser %p", aw.parser) - logger.Infof("newAnsiWriter: %v", aw) - return aw -} - -func (aw *ansiWriter) Fd() uintptr { - return aw.fd -} - -// Write writes len(p) bytes from p to the underlying data stream. -func (aw *ansiWriter) Write(p []byte) (total int, err error) { - if len(p) == 0 { - return 0, nil - } - - logger.Infof("Write: % x", p) - logger.Infof("Write: %s", string(p)) - return aw.parser.Parse(p) -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/term/windows/console.go b/vendor/github.com/hyperhq/hypercli/pkg/term/windows/console.go deleted file mode 100644 index 3036a0460..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/term/windows/console.go +++ /dev/null @@ -1,97 +0,0 @@ -// +build windows - -package windows - -import ( - "io" - "os" - "syscall" - - "github.com/Azure/go-ansiterm/winterm" - - ansiterm "github.com/Azure/go-ansiterm" - "github.com/Sirupsen/logrus" - "io/ioutil" -) - -// ConEmuStreams returns prepared versions of console streams, -// for proper use in ConEmu terminal. -// The ConEmu terminal emulates ANSI on output streams well by default. -func ConEmuStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { - if IsConsole(os.Stdin.Fd()) { - stdIn = newAnsiReader(syscall.STD_INPUT_HANDLE) - } else { - stdIn = os.Stdin - } - - stdOut = os.Stdout - stdErr = os.Stderr - - // WARNING (BEGIN): sourced from newAnsiWriter - - logFile := ioutil.Discard - - if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { - logFile, _ = os.Create("ansiReaderWriter.log") - } - - logger = &logrus.Logger{ - Out: logFile, - Formatter: new(logrus.TextFormatter), - Level: logrus.DebugLevel, - } - - // WARNING (END): sourced from newAnsiWriter - - return stdIn, stdOut, stdErr -} - -// ConsoleStreams returns a wrapped version for each standard stream referencing a console, -// that handles ANSI character sequences. -func ConsoleStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { - if IsConsole(os.Stdin.Fd()) { - stdIn = newAnsiReader(syscall.STD_INPUT_HANDLE) - } else { - stdIn = os.Stdin - } - - if IsConsole(os.Stdout.Fd()) { - stdOut = newAnsiWriter(syscall.STD_OUTPUT_HANDLE) - } else { - stdOut = os.Stdout - } - - if IsConsole(os.Stderr.Fd()) { - stdErr = newAnsiWriter(syscall.STD_ERROR_HANDLE) - } else { - stdErr = os.Stderr - } - - return stdIn, stdOut, stdErr -} - -// GetHandleInfo returns file descriptor and bool indicating whether the file is a console. -func GetHandleInfo(in interface{}) (uintptr, bool) { - switch t := in.(type) { - case *ansiReader: - return t.Fd(), true - case *ansiWriter: - return t.Fd(), true - } - - var inFd uintptr - var isTerminal bool - - if file, ok := in.(*os.File); ok { - inFd = file.Fd() - isTerminal = IsConsole(inFd) - } - return inFd, isTerminal -} - -// IsConsole returns true if the given file descriptor is a Windows Console. -// The code assumes that GetConsoleMode will return an error for file descriptors that are not a console. -func IsConsole(fd uintptr) bool { - _, e := winterm.GetConsoleMode(fd) - return e == nil -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/term/windows/windows.go b/vendor/github.com/hyperhq/hypercli/pkg/term/windows/windows.go deleted file mode 100644 index bf4c7b502..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/term/windows/windows.go +++ /dev/null @@ -1,5 +0,0 @@ -// These files implement ANSI-aware input and output streams for use by the Docker Windows client. -// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create -// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls. - -package windows diff --git a/vendor/github.com/hyperhq/hypercli/pkg/urlutil/urlutil.go b/vendor/github.com/hyperhq/hypercli/pkg/urlutil/urlutil.go deleted file mode 100644 index f7094b1fe..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/urlutil/urlutil.go +++ /dev/null @@ -1,50 +0,0 @@ -// Package urlutil provides helper function to check urls kind. -// It supports http urls, git urls and transport url (tcp://, …) -package urlutil - -import ( - "regexp" - "strings" -) - -var ( - validPrefixes = map[string][]string{ - "url": {"http://", "https://"}, - "git": {"git://", "github.com/", "git@"}, - "transport": {"tcp://", "udp://", "unix://"}, - } - urlPathWithFragmentSuffix = regexp.MustCompile(".git(?:#.+)?$") -) - -// IsURL returns true if the provided str is an HTTP(S) URL. -func IsURL(str string) bool { - return checkURL(str, "url") -} - -// IsGitURL returns true if the provided str is a git repository URL. -func IsGitURL(str string) bool { - if IsURL(str) && urlPathWithFragmentSuffix.MatchString(str) { - return true - } - return checkURL(str, "git") -} - -// IsGitTransport returns true if the provided str is a git transport by inspecting -// the prefix of the string for known protocols used in git. -func IsGitTransport(str string) bool { - return IsURL(str) || strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "git@") -} - -// IsTransportURL returns true if the provided str is a transport (tcp, udp, unix) URL. -func IsTransportURL(str string) bool { - return checkURL(str, "transport") -} - -func checkURL(str, kind string) bool { - for _, prefix := range validPrefixes[kind] { - if strings.HasPrefix(str, prefix) { - return true - } - } - return false -} diff --git a/vendor/github.com/hyperhq/hypercli/pkg/version/version.go b/vendor/github.com/hyperhq/hypercli/pkg/version/version.go deleted file mode 100644 index c001279f9..000000000 --- a/vendor/github.com/hyperhq/hypercli/pkg/version/version.go +++ /dev/null @@ -1,68 +0,0 @@ -package version - -import ( - "strconv" - "strings" -) - -// Version provides utility methods for comparing versions. -type Version string - -func (v Version) compareTo(other Version) int { - var ( - currTab = strings.Split(string(v), ".") - otherTab = strings.Split(string(other), ".") - ) - - max := len(currTab) - if len(otherTab) > max { - max = len(otherTab) - } - for i := 0; i < max; i++ { - var currInt, otherInt int - - if len(currTab) > i { - currInt, _ = strconv.Atoi(currTab[i]) - } - if len(otherTab) > i { - otherInt, _ = strconv.Atoi(otherTab[i]) - } - if currInt > otherInt { - return 1 - } - if otherInt > currInt { - return -1 - } - } - return 0 -} - -// String returns the version string -func (v Version) String() string { - return string(v) -} - -// LessThan checks if a version is less than another -func (v Version) LessThan(other Version) bool { - return v.compareTo(other) == -1 -} - -// LessThanOrEqualTo checks if a version is less than or equal to another -func (v Version) LessThanOrEqualTo(other Version) bool { - return v.compareTo(other) <= 0 -} - -// GreaterThan checks if a version is greater than another -func (v Version) GreaterThan(other Version) bool { - return v.compareTo(other) == 1 -} - -// GreaterThanOrEqualTo checks if a version is greater than or equal to another -func (v Version) GreaterThanOrEqualTo(other Version) bool { - return v.compareTo(other) >= 0 -} - -// Equal checks if a version is equal to another -func (v Version) Equal(other Version) bool { - return v.compareTo(other) == 0 -} diff --git a/vendor/github.com/hyperhq/hypercli/project/CONTRIBUTORS.md b/vendor/github.com/hyperhq/hypercli/project/CONTRIBUTORS.md deleted file mode 100644 index e499e1a9d..000000000 --- a/vendor/github.com/hyperhq/hypercli/project/CONTRIBUTORS.md +++ /dev/null @@ -1,434 +0,0 @@ -# Contributing to Docker - -Want to hack on Docker? Awesome! We have a contributor's guide that explains -[setting up a Docker development environment and the contribution -process](https://docs.docker.com/opensource/project/who-written-for/). - -![Contributors guide](docs/static_files/contributors.png) - -This page contains information about reporting issues as well as some tips and -guidelines useful to experienced open source contributors. Finally, make sure -you read our [community guidelines](#docker-community-guidelines) before you -start participating. - -## Topics - -* [Reporting Security Issues](#reporting-security-issues) -* [Design and Cleanup Proposals](#design-and-cleanup-proposals) -* [Reporting Issues](#reporting-other-issues) -* [Quick Contribution Tips and Guidelines](#quick-contribution-tips-and-guidelines) -* [Community Guidelines](#docker-community-guidelines) - -## Reporting security issues - -The Docker maintainers take security seriously. If you discover a security -issue, please bring it to their attention right away! - -Please **DO NOT** file a public issue, instead send your report privately to -[security@docker.com](mailto:security@docker.com). - -Security reports are greatly appreciated and we will publicly thank you for it. -We also like to send gifts—if you're into Docker schwag, make sure to let -us know. We currently do not offer a paid security bounty program, but are not -ruling it out in the future. - - -## Reporting other issues - -A great way to contribute to the project is to send a detailed report when you -encounter an issue. We always appreciate a well-written, thorough bug report, -and will thank you for it! - -Check that [our issue database](https://github.com/docker/docker/issues) -doesn't already include that problem or suggestion before submitting an issue. -If you find a match, you can use the "subscribe" button to get notified on -updates. Do *not* leave random "+1" or "I have this too" comments, as they -only clutter the discussion, and don't help resolving it. However, if you -have ways to reproduce the issue or have additional information that may help -resolving the issue, please leave a comment. - -When reporting issues, always include: - -* The output of `docker version`. -* The output of `docker info`. - -Also include the steps required to reproduce the problem if possible and -applicable. This information will help us review and fix your issue faster. -When sending lengthy log-files, consider posting them as a gist (https://gist.github.com). -Don't forget to remove sensitive data from your logfiles before posting (you can -replace those parts with "REDACTED"). - -**Issue Report Template**: - -``` -Description of problem: - - -`docker version`: - - -`docker info`: - - -`uname -a`: - - -Environment details (AWS, VirtualBox, physical, etc.): - - -How reproducible: - - -Steps to Reproduce: -1. -2. -3. - - -Actual Results: - - -Expected Results: - - -Additional info: - - - -``` - - -##Quick contribution tips and guidelines - -This section gives the experienced contributor some tips and guidelines. - -###Pull requests are always welcome - -Not sure if that typo is worth a pull request? Found a bug and know how to fix -it? Do it! We will appreciate it. Any significant improvement should be -documented as [a GitHub issue](https://github.com/docker/docker/issues) before -anybody starts working on it. - -We are always thrilled to receive pull requests. We do our best to process them -quickly. If your pull request is not accepted on the first try, -don't get discouraged! Our contributor's guide explains [the review process we -use for simple changes](https://docs.docker.com/opensource/workflow/make-a-contribution/). - -### Design and cleanup proposals - -You can propose new designs for existing Docker features. You can also design -entirely new features. We really appreciate contributors who want to refactor or -otherwise cleanup our project. For information on making these types of -contributions, see [the advanced contribution -section](https://docs.docker.com/opensource/workflow/advanced-contributing/) in -the contributors guide. - -We try hard to keep Docker lean and focused. Docker can't do everything for -everybody. This means that we might decide against incorporating a new feature. -However, there might be a way to implement that feature *on top of* Docker. - -### Talking to other Docker users and contributors - - - - - - - - - - - - - - - - - - - - -
Internet Relay Chat (IRC) -

- IRC a direct line to our most knowledgeable Docker users; we have - both the #docker and #docker-dev group on - irc.freenode.net. - IRC is a rich chat protocol but it can overwhelm new users. You can search - our chat archives. -

- Read our IRC quickstart guide for an easy way to get started. -
Google Groups - There are two groups. - Docker-user - is for people using Docker containers. - The docker-dev - group is for contributors and other people contributing to the Docker - project. -
Twitter - You can follow Docker's Twitter feed - to get updates on our products. You can also tweet us questions or just - share blogs or stories. -
Stack Overflow - Stack Overflow has over 17000 Docker questions listed. We regularly - monitor Docker questions - and so do many other knowledgeable Docker users. -
- - -### Conventions - -Fork the repository and make changes on your fork in a feature branch: - -- If it's a bug fix branch, name it XXXX-something where XXXX is the number of - the issue. -- If it's a feature branch, create an enhancement issue to announce - your intentions, and name it XXXX-something where XXXX is the number of the - issue. - -Submit unit tests for your changes. Go has a great test framework built in; use -it! Take a look at existing tests for inspiration. [Run the full test -suite](https://docs.docker.com/opensource/project/test-and-docs/) on your branch before -submitting a pull request. - -Update the documentation when creating or modifying features. Test your -documentation changes for clarity, concision, and correctness, as well as a -clean documentation build. See our contributors guide for [our style -guide](https://docs.docker.com/opensource/doc-style) and instructions on [building -the documentation](https://docs.docker.com/opensource/project/test-and-docs/#build-and-test-the-documentation). - -Write clean code. Universally formatted code promotes ease of writing, reading, -and maintenance. Always run `gofmt -s -w file.go` on each changed file before -committing your changes. Most editors have plug-ins that do this automatically. - -Pull request descriptions should be as clear as possible and include a reference -to all the issues that they address. - -Commit messages must start with a capitalized and short summary (max. 50 chars) -written in the imperative, followed by an optional, more detailed explanatory -text which is separated from the summary by an empty line. - -Code review comments may be added to your pull request. Discuss, then make the -suggested modifications and push additional commits to your feature branch. Post -a comment after pushing. New commits show up in the pull request automatically, -but the reviewers are notified only when you comment. - -Pull requests must be cleanly rebased on top of master without multiple branches -mixed into the PR. - -**Git tip**: If your PR no longer merges cleanly, use `rebase master` in your -feature branch to update your pull request rather than `merge master`. - -Before you make a pull request, squash your commits into logical units of work -using `git rebase -i` and `git push -f`. A logical unit of work is a consistent -set of patches that should be reviewed together: for example, upgrading the -version of a vendored dependency and taking advantage of its now available new -feature constitute two separate units of work. Implementing a new function and -calling it in another file constitute a single logical unit of work. The very -high majority of submissions should have a single commit, so if in doubt: squash -down to one. - -After every commit, [make sure the test suite passes] -(https://docs.docker.com/opensource/project/test-and-docs/). Include documentation -changes in the same pull request so that a revert would remove all traces of -the feature or fix. - -Include an issue reference like `Closes #XXXX` or `Fixes #XXXX` in commits that -close an issue. Including references automatically closes the issue on a merge. - -Please do not add yourself to the `AUTHORS` file, as it is regenerated regularly -from the Git history. - -Please see the [Coding Style](#coding-style) for further guidelines. - -### Merge approval - -Docker maintainers use LGTM (Looks Good To Me) in comments on the code review to -indicate acceptance. - -A change requires LGTMs from an absolute majority of the maintainers of each -component affected. For example, if a change affects `docs/` and `registry/`, it -needs an absolute majority from the maintainers of `docs/` AND, separately, an -absolute majority of the maintainers of `registry/`. - -For more details, see the [MAINTAINERS](MAINTAINERS) page. - -### Sign your work - -The sign-off is a simple line at the end of the explanation for the patch. Your -signature certifies that you wrote the patch or otherwise have the right to pass -it on as an open-source patch. The rules are pretty simple: if you can certify -the below (from [developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -Then you just add a line to every git commit message: - - Signed-off-by: Joe Smith - -Use your real name (sorry, no pseudonyms or anonymous contributions.) - -If you set your `user.name` and `user.email` git configs, you can sign your -commit automatically with `git commit -s`. - -Note that the old-style `Docker-DCO-1.1-Signed-off-by: ...` format is still -accepted, so there is no need to update outstanding pull requests to the new -format right away, but please do adjust your processes for future contributions. - -### How can I become a maintainer? - -The procedures for adding new maintainers are explained in the -global [MAINTAINERS](https://github.com/docker/opensource/blob/master/MAINTAINERS) -file in the [https://github.com/docker/opensource/](https://github.com/docker/opensource/) -repository. - -Don't forget: being a maintainer is a time investment. Make sure you -will have time to make yourself available. You don't have to be a -maintainer to make a difference on the project! - -## Docker community guidelines - -We want to keep the Docker community awesome, growing and collaborative. We need -your help to keep it that way. To help with this we've come up with some general -guidelines for the community as a whole: - -* Be nice: Be courteous, respectful and polite to fellow community members: - no regional, racial, gender, or other abuse will be tolerated. We like - nice people way better than mean ones! - -* Encourage diversity and participation: Make everyone in our community feel - welcome, regardless of their background and the extent of their - contributions, and do everything possible to encourage participation in - our community. - -* Keep it legal: Basically, don't get us in trouble. Share only content that - you own, do not share private or sensitive information, and don't break - the law. - -* Stay on topic: Make sure that you are posting to the correct channel and - avoid off-topic discussions. Remember when you update an issue or respond - to an email you are potentially sending to a large number of people. Please - consider this before you update. Also remember that nobody likes spam. - -* Don't send email to the maintainers: There's no need to send email to the - maintainers to ask them to investigate an issue or to take a look at a - pull request. Instead of sending an email, GitHub mentions should be - used to ping maintainers to review a pull request, a proposal or an - issue. - -### Guideline violations — 3 strikes method - -The point of this section is not to find opportunities to punish people, but we -do need a fair way to deal with people who are making our community suck. - -1. First occurrence: We'll give you a friendly, but public reminder that the - behavior is inappropriate according to our guidelines. - -2. Second occurrence: We will send you a private message with a warning that - any additional violations will result in removal from the community. - -3. Third occurrence: Depending on the violation, we may need to delete or ban - your account. - -**Notes:** - -* Obvious spammers are banned on first occurrence. If we don't do this, we'll - have spam all over the place. - -* Violations are forgiven after 6 months of good behavior, and we won't hold a - grudge. - -* People who commit minor infractions will get some education, rather than - hammering them in the 3 strikes process. - -* The rules apply equally to everyone in the community, no matter how much - you've contributed. - -* Extreme violations of a threatening, abusive, destructive or illegal nature - will be addressed immediately and are not subject to 3 strikes or forgiveness. - -* Contact abuse@docker.com to report abuse or appeal violations. In the case of - appeals, we know that mistakes happen, and we'll work with you to come up with a - fair solution if there has been a misunderstanding. - -## Coding Style - -Unless explicitly stated, we follow all coding guidelines from the Go -community. While some of these standards may seem arbitrary, they somehow seem -to result in a solid, consistent codebase. - -It is possible that the code base does not currently comply with these -guidelines. We are not looking for a massive PR that fixes this, since that -goes against the spirit of the guidelines. All new contributions should make a -best effort to clean up and make the code base better than they left it. -Obviously, apply your best judgement. Remember, the goal here is to make the -code base easier for humans to navigate and understand. Always keep that in -mind when nudging others to comply. - -The rules: - -1. All code should be formatted with `gofmt -s`. -2. All code should pass the default levels of - [`golint`](https://github.com/golang/lint). -3. All code should follow the guidelines covered in [Effective - Go](http://golang.org/doc/effective_go.html) and [Go Code Review - Comments](https://github.com/golang/go/wiki/CodeReviewComments). -4. Comment the code. Tell us the why, the history and the context. -5. Document _all_ declarations and methods, even private ones. Declare - expectations, caveats and anything else that may be important. If a type - gets exported, having the comments already there will ensure it's ready. -6. Variable name length should be proportional to it's context and no longer. - `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`. - In practice, short methods will have short variable names and globals will - have longer names. -7. No underscores in package names. If you need a compound name, step back, - and re-examine why you need a compound name. If you still think you need a - compound name, lose the underscore. -8. No utils or helpers packages. If a function is not general enough to - warrant it's own package, it has not been written generally enough to be a - part of a util package. Just leave it unexported and well-documented. -9. All tests should run with `go test` and outside tooling should not be - required. No, we don't need another unit testing framework. Assertion - packages are acceptable if they provide _real_ incremental value. -10. Even though we call these "rules" above, they are actually just - guidelines. Since you've read all the rules, you now know that. - -If you are having trouble getting into the mood of idiomatic Go, we recommend -reading through [Effective Go](http://golang.org/doc/effective_go.html). The -[Go Blog](http://blog.golang.org/) is also a great resource. Drinking the -kool-aid is a lot easier than going thirsty. diff --git a/vendor/github.com/hyperhq/hypercli/reference/reference.go b/vendor/github.com/hyperhq/hypercli/reference/reference.go deleted file mode 100644 index d7d89fe94..000000000 --- a/vendor/github.com/hyperhq/hypercli/reference/reference.go +++ /dev/null @@ -1,191 +0,0 @@ -package reference - -import ( - "fmt" - "strings" - - "github.com/docker/distribution/digest" - distreference "github.com/docker/distribution/reference" - "github.com/hyperhq/hypercli/image/v1" -) - -const ( - // DefaultTag defines the default tag used when performing images related actions and no tag or digest is specified - DefaultTag = "latest" - // DefaultHostname is the default built-in hostname - DefaultHostname = "docker.io" - // LegacyDefaultHostname is automatically converted to DefaultHostname - LegacyDefaultHostname = "index.docker.io" - // DefaultRepoPrefix is the prefix used for default repositories in default host - DefaultRepoPrefix = "library/" -) - -// Named is an object with a full name -type Named interface { - // Name returns normalized repository name, like "ubuntu". - Name() string - // String returns full reference, like "ubuntu@sha256:abcdef..." - String() string - // FullName returns full repository name with hostname, like "docker.io/library/ubuntu" - FullName() string - // Hostname returns hostname for the reference, like "docker.io" - Hostname() string - // RemoteName returns the repository component of the full name, like "library/ubuntu" - RemoteName() string -} - -// NamedTagged is an object including a name and tag. -type NamedTagged interface { - Named - Tag() string -} - -// Canonical reference is an object with a fully unique -// name including a name with hostname and digest -type Canonical interface { - Named - Digest() digest.Digest -} - -// ParseNamed parses s and returns a syntactically valid reference implementing -// the Named interface. The reference must have a name, otherwise an error is -// returned. -// If an error was encountered it is returned, along with a nil Reference. -func ParseNamed(s string) (Named, error) { - named, err := distreference.ParseNamed(s) - if err != nil { - return nil, fmt.Errorf("Error parsing reference: %q is not a valid repository/tag", s) - } - r, err := WithName(named.Name()) - if err != nil { - return nil, err - } - if canonical, isCanonical := named.(distreference.Canonical); isCanonical { - return WithDigest(r, canonical.Digest()) - } - if tagged, isTagged := named.(distreference.NamedTagged); isTagged { - return WithTag(r, tagged.Tag()) - } - return r, nil -} - -// WithName returns a named object representing the given string. If the input -// is invalid ErrReferenceInvalidFormat will be returned. -func WithName(name string) (Named, error) { - name = normalize(name) - if err := validateName(name); err != nil { - return nil, err - } - r, err := distreference.WithName(name) - if err != nil { - return nil, err - } - return &namedRef{r}, nil -} - -// WithTag combines the name from "name" and the tag from "tag" to form a -// reference incorporating both the name and the tag. -func WithTag(name Named, tag string) (NamedTagged, error) { - r, err := distreference.WithTag(name, tag) - if err != nil { - return nil, err - } - return &taggedRef{namedRef{r}}, nil -} - -// WithDigest combines the name from "name" and the digest from "digest" to form -// a reference incorporating both the name and the digest. -func WithDigest(name Named, digest digest.Digest) (Canonical, error) { - r, err := distreference.WithDigest(name, digest) - if err != nil { - return nil, err - } - return &canonicalRef{namedRef{r}}, nil -} - -type namedRef struct { - distreference.Named -} -type taggedRef struct { - namedRef -} -type canonicalRef struct { - namedRef -} - -func (r *namedRef) FullName() string { - hostname, remoteName := splitHostname(r.Name()) - return hostname + "/" + remoteName -} -func (r *namedRef) Hostname() string { - hostname, _ := splitHostname(r.Name()) - return hostname -} -func (r *namedRef) RemoteName() string { - _, remoteName := splitHostname(r.Name()) - return remoteName -} -func (r *taggedRef) Tag() string { - return r.namedRef.Named.(distreference.NamedTagged).Tag() -} -func (r *canonicalRef) Digest() digest.Digest { - return r.namedRef.Named.(distreference.Canonical).Digest() -} - -// WithDefaultTag adds a default tag to a reference if it only has a repo name. -func WithDefaultTag(ref Named) Named { - if IsNameOnly(ref) { - ref, _ = WithTag(ref, DefaultTag) - } - return ref -} - -// IsNameOnly returns true if reference only contains a repo name. -func IsNameOnly(ref Named) bool { - if _, ok := ref.(NamedTagged); ok { - return false - } - if _, ok := ref.(Canonical); ok { - return false - } - return true -} - -// splitHostname splits a repository name to hostname and remotename string. -// If no valid hostname is found, the default hostname is used. Repository name -// needs to be already validated before. -func splitHostname(name string) (hostname, remoteName string) { - i := strings.IndexRune(name, '/') - if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { - hostname, remoteName = DefaultHostname, name - } else { - hostname, remoteName = name[:i], name[i+1:] - } - if hostname == LegacyDefaultHostname { - hostname = DefaultHostname - } - if hostname == DefaultHostname && !strings.ContainsRune(remoteName, '/') { - remoteName = DefaultRepoPrefix + remoteName - } - return -} - -// normalize returns a repository name in its normalized form, meaning it -// will not contain default hostname nor library/ prefix for official images. -func normalize(name string) string { - host, remoteName := splitHostname(name) - if host == DefaultHostname { - if strings.HasPrefix(remoteName, DefaultRepoPrefix) { - return strings.TrimPrefix(remoteName, DefaultRepoPrefix) - } - return remoteName - } - return name -} - -func validateName(name string) error { - if err := v1.ValidateID(name); err == nil { - return fmt.Errorf("Invalid repository name (%s), cannot specify 64-byte hexadecimal strings", name) - } - return nil -} diff --git a/vendor/github.com/hyperhq/hypercli/reference/store.go b/vendor/github.com/hyperhq/hypercli/reference/store.go deleted file mode 100644 index ff0174dc5..000000000 --- a/vendor/github.com/hyperhq/hypercli/reference/store.go +++ /dev/null @@ -1,298 +0,0 @@ -package reference - -import ( - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sort" - "sync" - - "github.com/docker/distribution/digest" - "github.com/hyperhq/hypercli/image" -) - -var ( - // ErrDoesNotExist is returned if a reference is not found in the - // store. - ErrDoesNotExist = errors.New("reference does not exist") -) - -// An Association is a tuple associating a reference with an image ID. -type Association struct { - Ref Named - ImageID image.ID -} - -// Store provides the set of methods which can operate on a tag store. -type Store interface { - References(id image.ID) []Named - ReferencesByName(ref Named) []Association - AddTag(ref Named, id image.ID, force bool) error - AddDigest(ref Canonical, id image.ID, force bool) error - Delete(ref Named) (bool, error) - Get(ref Named) (image.ID, error) -} - -type store struct { - mu sync.RWMutex - // jsonPath is the path to the file where the serialized tag data is - // stored. - jsonPath string - // Repositories is a map of repositories, indexed by name. - Repositories map[string]repository - // referencesByIDCache is a cache of references indexed by ID, to speed - // up References. - referencesByIDCache map[image.ID]map[string]Named -} - -// Repository maps tags to image IDs. The key is a a stringified Reference, -// including the repository name. -type repository map[string]image.ID - -type lexicalRefs []Named - -func (a lexicalRefs) Len() int { return len(a) } -func (a lexicalRefs) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a lexicalRefs) Less(i, j int) bool { return a[i].String() < a[j].String() } - -type lexicalAssociations []Association - -func (a lexicalAssociations) Len() int { return len(a) } -func (a lexicalAssociations) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a lexicalAssociations) Less(i, j int) bool { return a[i].Ref.String() < a[j].Ref.String() } - -// NewReferenceStore creates a new reference store, tied to a file path where -// the set of references are serialized in JSON format. -func NewReferenceStore(jsonPath string) (Store, error) { - abspath, err := filepath.Abs(jsonPath) - if err != nil { - return nil, err - } - - store := &store{ - jsonPath: abspath, - Repositories: make(map[string]repository), - referencesByIDCache: make(map[image.ID]map[string]Named), - } - // Load the json file if it exists, otherwise create it. - if err := store.reload(); os.IsNotExist(err) { - if err := store.save(); err != nil { - return nil, err - } - } else if err != nil { - return nil, err - } - return store, nil -} - -// AddTag adds a tag reference to the store. If force is set to true, existing -// references can be overwritten. This only works for tags, not digests. -func (store *store) AddTag(ref Named, id image.ID, force bool) error { - if _, isCanonical := ref.(Canonical); isCanonical { - return errors.New("refusing to create a tag with a digest reference") - } - return store.addReference(WithDefaultTag(ref), id, force) -} - -// AddDigest adds a digest reference to the store. -func (store *store) AddDigest(ref Canonical, id image.ID, force bool) error { - return store.addReference(ref, id, force) -} - -func (store *store) addReference(ref Named, id image.ID, force bool) error { - if ref.Name() == string(digest.Canonical) { - return errors.New("refusing to create an ambiguous tag using digest algorithm as name") - } - - store.mu.Lock() - defer store.mu.Unlock() - - repository, exists := store.Repositories[ref.Name()] - if !exists || repository == nil { - repository = make(map[string]image.ID) - store.Repositories[ref.Name()] = repository - } - - refStr := ref.String() - oldID, exists := repository[refStr] - - if exists { - // force only works for tags - if digested, isDigest := ref.(Canonical); isDigest { - return fmt.Errorf("Cannot overwrite digest %s", digested.Digest().String()) - } - - if !force { - return fmt.Errorf("Conflict: Tag %s is already set to image %s, if you want to replace it, please use -f option", ref.String(), oldID.String()) - } - - if store.referencesByIDCache[oldID] != nil { - delete(store.referencesByIDCache[oldID], refStr) - if len(store.referencesByIDCache[oldID]) == 0 { - delete(store.referencesByIDCache, oldID) - } - } - } - - repository[refStr] = id - if store.referencesByIDCache[id] == nil { - store.referencesByIDCache[id] = make(map[string]Named) - } - store.referencesByIDCache[id][refStr] = ref - - return store.save() -} - -// Delete deletes a reference from the store. It returns true if a deletion -// happened, or false otherwise. -func (store *store) Delete(ref Named) (bool, error) { - ref = WithDefaultTag(ref) - - store.mu.Lock() - defer store.mu.Unlock() - - repoName := ref.Name() - - repository, exists := store.Repositories[repoName] - if !exists { - return false, ErrDoesNotExist - } - - refStr := ref.String() - if id, exists := repository[refStr]; exists { - delete(repository, refStr) - if len(repository) == 0 { - delete(store.Repositories, repoName) - } - if store.referencesByIDCache[id] != nil { - delete(store.referencesByIDCache[id], refStr) - if len(store.referencesByIDCache[id]) == 0 { - delete(store.referencesByIDCache, id) - } - } - return true, store.save() - } - - return false, ErrDoesNotExist -} - -// Get retrieves an item from the store by -func (store *store) Get(ref Named) (image.ID, error) { - ref = WithDefaultTag(ref) - - store.mu.RLock() - defer store.mu.RUnlock() - - repository, exists := store.Repositories[ref.Name()] - if !exists || repository == nil { - return "", ErrDoesNotExist - } - - id, exists := repository[ref.String()] - if !exists { - return "", ErrDoesNotExist - } - - return id, nil -} - -// References returns a slice of references to the given image ID. The slice -// will be nil if there are no references to this image ID. -func (store *store) References(id image.ID) []Named { - store.mu.RLock() - defer store.mu.RUnlock() - - // Convert the internal map to an array for two reasons: - // 1) We must not return a mutable - // 2) It would be ugly to expose the extraneous map keys to callers. - - var references []Named - for _, ref := range store.referencesByIDCache[id] { - references = append(references, ref) - } - - sort.Sort(lexicalRefs(references)) - - return references -} - -// ReferencesByName returns the references for a given repository name. -// If there are no references known for this repository name, -// ReferencesByName returns nil. -func (store *store) ReferencesByName(ref Named) []Association { - store.mu.RLock() - defer store.mu.RUnlock() - - repository, exists := store.Repositories[ref.Name()] - if !exists { - return nil - } - - var associations []Association - for refStr, refID := range repository { - ref, err := ParseNamed(refStr) - if err != nil { - // Should never happen - return nil - } - associations = append(associations, - Association{ - Ref: ref, - ImageID: refID, - }) - } - - sort.Sort(lexicalAssociations(associations)) - - return associations -} - -func (store *store) save() error { - // Store the json - jsonData, err := json.Marshal(store) - if err != nil { - return err - } - - tempFilePath := store.jsonPath + ".tmp" - - if err := ioutil.WriteFile(tempFilePath, jsonData, 0600); err != nil { - return err - } - - if err := os.Rename(tempFilePath, store.jsonPath); err != nil { - return err - } - - return nil -} - -func (store *store) reload() error { - f, err := os.Open(store.jsonPath) - if err != nil { - return err - } - defer f.Close() - if err := json.NewDecoder(f).Decode(&store); err != nil { - return err - } - - for _, repository := range store.Repositories { - for refStr, refID := range repository { - ref, err := ParseNamed(refStr) - if err != nil { - // Should never happen - continue - } - if store.referencesByIDCache[refID] == nil { - store.referencesByIDCache[refID] = make(map[string]Named) - } - store.referencesByIDCache[refID][refStr] = ref - } - } - - return nil -} diff --git a/vendor/github.com/hyperhq/hypercli/registry/auth.go b/vendor/github.com/hyperhq/hypercli/registry/auth.go deleted file mode 100644 index 0f28ce111..000000000 --- a/vendor/github.com/hyperhq/hypercli/registry/auth.go +++ /dev/null @@ -1,255 +0,0 @@ -package registry - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/hyperhq/hyper-api/types" - registrytypes "github.com/hyperhq/hyper-api/types/registry" -) - -// Login tries to register/login to the registry server. -func Login(authConfig *types.AuthConfig, registryEndpoint *Endpoint) (string, error) { - // Separates the v2 registry login logic from the v1 logic. - if registryEndpoint.Version == APIVersion2 { - return loginV2(authConfig, registryEndpoint, "" /* scope */) - } - return loginV1(authConfig, registryEndpoint) -} - -// loginV1 tries to register/login to the v1 registry server. -func loginV1(authConfig *types.AuthConfig, registryEndpoint *Endpoint) (string, error) { - var ( - status string - respBody []byte - err error - respStatusCode = 0 - serverAddress = authConfig.ServerAddress - ) - - logrus.Debugf("attempting v1 login to registry endpoint %s", registryEndpoint) - - if serverAddress == "" { - return "", fmt.Errorf("Server Error: Server Address not set.") - } - - loginAgainstOfficialIndex := serverAddress == IndexServer - - // to avoid sending the server address to the server it should be removed before being marshaled - authCopy := *authConfig - authCopy.ServerAddress = "" - - jsonBody, err := json.Marshal(authCopy) - if err != nil { - return "", fmt.Errorf("Config Error: %s", err) - } - - // using `bytes.NewReader(jsonBody)` here causes the server to respond with a 411 status. - b := strings.NewReader(string(jsonBody)) - resp1, err := registryEndpoint.client.Post(serverAddress+"users/", "application/json; charset=utf-8", b) - if err != nil { - return "", fmt.Errorf("Server Error: %s", err) - } - defer resp1.Body.Close() - respStatusCode = resp1.StatusCode - respBody, err = ioutil.ReadAll(resp1.Body) - if err != nil { - return "", fmt.Errorf("Server Error: [%#v] %s", respStatusCode, err) - } - - if respStatusCode == 201 { - if loginAgainstOfficialIndex { - status = "Account created. Please use the confirmation link we sent" + - " to your e-mail to activate it." - } else { - // *TODO: Use registry configuration to determine what this says, if anything? - status = "Account created. Please see the documentation of the registry " + serverAddress + " for instructions how to activate it." - } - } else if respStatusCode == 400 { - if string(respBody) == "\"Username or email already exists\"" { - req, err := http.NewRequest("GET", serverAddress+"users/", nil) - req.SetBasicAuth(authConfig.Username, authConfig.Password) - resp, err := registryEndpoint.client.Do(req) - if err != nil { - return "", err - } - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", err - } - if resp.StatusCode == 200 { - return "Login Succeeded", nil - } else if resp.StatusCode == 401 { - return "", fmt.Errorf("Wrong login/password, please try again") - } else if resp.StatusCode == 403 { - if loginAgainstOfficialIndex { - return "", fmt.Errorf("Login: Account is not Active. Please check your e-mail for a confirmation link.") - } - // *TODO: Use registry configuration to determine what this says, if anything? - return "", fmt.Errorf("Login: Account is not Active. Please see the documentation of the registry %s for instructions how to activate it.", serverAddress) - } else if resp.StatusCode == 500 { // Issue #14326 - logrus.Errorf("%s returned status code %d. Response Body :\n%s", req.URL.String(), resp.StatusCode, body) - return "", fmt.Errorf("Internal Server Error") - } - return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, resp.StatusCode, resp.Header) - } - return "", fmt.Errorf("Registration: %s", respBody) - - } else if respStatusCode == 401 { - // This case would happen with private registries where /v1/users is - // protected, so people can use `docker login` as an auth check. - req, err := http.NewRequest("GET", serverAddress+"users/", nil) - req.SetBasicAuth(authConfig.Username, authConfig.Password) - resp, err := registryEndpoint.client.Do(req) - if err != nil { - return "", err - } - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", err - } - if resp.StatusCode == 200 { - return "Login Succeeded", nil - } else if resp.StatusCode == 401 { - return "", fmt.Errorf("Wrong login/password, please try again") - } else { - return "", fmt.Errorf("Login: %s (Code: %d; Headers: %s)", body, - resp.StatusCode, resp.Header) - } - } else { - return "", fmt.Errorf("Unexpected status code [%d] : %s", respStatusCode, respBody) - } - return status, nil -} - -// loginV2 tries to login to the v2 registry server. The given registry endpoint has been -// pinged or setup with a list of authorization challenges. Each of these challenges are -// tried until one of them succeeds. Currently supported challenge schemes are: -// HTTP Basic Authorization -// Token Authorization with a separate token issuing server -// NOTE: the v2 logic does not attempt to create a user account if one doesn't exist. For -// now, users should create their account through other means like directly from a web page -// served by the v2 registry service provider. Whether this will be supported in the future -// is to be determined. -func loginV2(authConfig *types.AuthConfig, registryEndpoint *Endpoint, scope string) (string, error) { - logrus.Debugf("attempting v2 login to registry endpoint %s", registryEndpoint) - var ( - err error - allErrors []error - ) - - for _, challenge := range registryEndpoint.AuthChallenges { - params := make(map[string]string, len(challenge.Parameters)+1) - for k, v := range challenge.Parameters { - params[k] = v - } - params["scope"] = scope - logrus.Debugf("trying %q auth challenge with params %v", challenge.Scheme, params) - - switch strings.ToLower(challenge.Scheme) { - case "basic": - err = tryV2BasicAuthLogin(authConfig, params, registryEndpoint) - case "bearer": - err = tryV2TokenAuthLogin(authConfig, params, registryEndpoint) - default: - // Unsupported challenge types are explicitly skipped. - err = fmt.Errorf("unsupported auth scheme: %q", challenge.Scheme) - } - - if err == nil { - return "Login Succeeded", nil - } - - logrus.Debugf("error trying auth challenge %q: %s", challenge.Scheme, err) - - allErrors = append(allErrors, err) - } - - return "", fmt.Errorf("no successful auth challenge for %s - errors: %s", registryEndpoint, allErrors) -} - -func tryV2BasicAuthLogin(authConfig *types.AuthConfig, params map[string]string, registryEndpoint *Endpoint) error { - req, err := http.NewRequest("GET", registryEndpoint.Path(""), nil) - if err != nil { - return err - } - - req.SetBasicAuth(authConfig.Username, authConfig.Password) - - resp, err := registryEndpoint.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("basic auth attempt to %s realm %q failed with status: %d %s", registryEndpoint, params["realm"], resp.StatusCode, http.StatusText(resp.StatusCode)) - } - - return nil -} - -func tryV2TokenAuthLogin(authConfig *types.AuthConfig, params map[string]string, registryEndpoint *Endpoint) error { - token, err := getToken(authConfig.Username, authConfig.Password, params, registryEndpoint) - if err != nil { - return err - } - - req, err := http.NewRequest("GET", registryEndpoint.Path(""), nil) - if err != nil { - return err - } - - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) - - resp, err := registryEndpoint.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("token auth attempt to %s realm %q failed with status: %d %s", registryEndpoint, params["realm"], resp.StatusCode, http.StatusText(resp.StatusCode)) - } - - return nil -} - -// ResolveAuthConfig matches an auth configuration to a server address or a URL -func ResolveAuthConfig(authConfigs map[string]types.AuthConfig, index *registrytypes.IndexInfo) types.AuthConfig { - configKey := GetAuthConfigKey(index) - // First try the happy case - if c, found := authConfigs[configKey]; found || index.Official { - return c - } - - convertToHostname := func(url string) string { - stripped := url - if strings.HasPrefix(url, "http://") { - stripped = strings.Replace(url, "http://", "", 1) - } else if strings.HasPrefix(url, "https://") { - stripped = strings.Replace(url, "https://", "", 1) - } - - nameParts := strings.SplitN(stripped, "/", 2) - - return nameParts[0] - } - - // Maybe they have a legacy config file, we will iterate the keys converting - // them to the new format and testing - for registry, ac := range authConfigs { - if configKey == convertToHostname(registry) { - return ac - } - } - - // When all else fails, return an empty auth config - return types.AuthConfig{} -} diff --git a/vendor/github.com/hyperhq/hypercli/registry/authchallenge.go b/vendor/github.com/hyperhq/hypercli/registry/authchallenge.go deleted file mode 100644 index e300d82a0..000000000 --- a/vendor/github.com/hyperhq/hypercli/registry/authchallenge.go +++ /dev/null @@ -1,150 +0,0 @@ -package registry - -import ( - "net/http" - "strings" -) - -// Octet types from RFC 2616. -type octetType byte - -// AuthorizationChallenge carries information -// from a WWW-Authenticate response header. -type AuthorizationChallenge struct { - Scheme string - Parameters map[string]string -} - -var octetTypes [256]octetType - -const ( - isToken octetType = 1 << iota - isSpace -) - -func init() { - // OCTET = - // CHAR = - // CTL = - // CR = - // LF = - // SP = - // HT = - // <"> = - // CRLF = CR LF - // LWS = [CRLF] 1*( SP | HT ) - // TEXT = - // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> - // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT - // token = 1* - // qdtext = > - - for c := 0; c < 256; c++ { - var t octetType - isCtl := c <= 31 || c == 127 - isChar := 0 <= c && c <= 127 - isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 - if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { - t |= isSpace - } - if isChar && !isCtl && !isSeparator { - t |= isToken - } - octetTypes[c] = t - } -} - -func parseAuthHeader(header http.Header) []*AuthorizationChallenge { - var challenges []*AuthorizationChallenge - for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { - v, p := parseValueAndParams(h) - if v != "" { - challenges = append(challenges, &AuthorizationChallenge{Scheme: v, Parameters: p}) - } - } - return challenges -} - -func parseValueAndParams(header string) (value string, params map[string]string) { - params = make(map[string]string) - value, s := expectToken(header) - if value == "" { - return - } - value = strings.ToLower(value) - s = "," + skipSpace(s) - for strings.HasPrefix(s, ",") { - var pkey string - pkey, s = expectToken(skipSpace(s[1:])) - if pkey == "" { - return - } - if !strings.HasPrefix(s, "=") { - return - } - var pvalue string - pvalue, s = expectTokenOrQuoted(s[1:]) - if pvalue == "" { - return - } - pkey = strings.ToLower(pkey) - params[pkey] = pvalue - s = skipSpace(s) - } - return -} - -func skipSpace(s string) (rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isSpace == 0 { - break - } - } - return s[i:] -} - -func expectToken(s string) (token, rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isToken == 0 { - break - } - } - return s[:i], s[i:] -} - -func expectTokenOrQuoted(s string) (value string, rest string) { - if !strings.HasPrefix(s, "\"") { - return expectToken(s) - } - s = s[1:] - for i := 0; i < len(s); i++ { - switch s[i] { - case '"': - return s[:i], s[i+1:] - case '\\': - p := make([]byte, len(s)-1) - j := copy(p, s[:i]) - escape := true - for i = i + i; i < len(s); i++ { - b := s[i] - switch { - case escape: - escape = false - p[j] = b - j++ - case b == '\\': - escape = true - case b == '"': - return string(p[:j]), s[i+1:] - default: - p[j] = b - j++ - } - } - return "", "" - } - } - return "", "" -} diff --git a/vendor/github.com/hyperhq/hypercli/registry/config.go b/vendor/github.com/hyperhq/hypercli/registry/config.go deleted file mode 100644 index f9231fa18..000000000 --- a/vendor/github.com/hyperhq/hypercli/registry/config.go +++ /dev/null @@ -1,257 +0,0 @@ -package registry - -import ( - "errors" - "fmt" - "net" - "net/url" - "strings" - - registrytypes "github.com/hyperhq/hyper-api/types/registry" - "github.com/hyperhq/hypercli/opts" - flag "github.com/hyperhq/hypercli/pkg/mflag" - "github.com/hyperhq/hypercli/reference" -) - -// Options holds command line options. -type Options struct { - Mirrors opts.ListOpts - InsecureRegistries opts.ListOpts -} - -const ( - // DefaultNamespace is the default namespace - DefaultNamespace = "docker.io" - // DefaultRegistryVersionHeader is the name of the default HTTP header - // that carries Registry version info - DefaultRegistryVersionHeader = "Docker-Distribution-Api-Version" - - // IndexServer is the v1 registry server used for user auth + account creation - IndexServer = DefaultV1Registry + "/v1/" - // IndexName is the name of the index - IndexName = "docker.io" - - // NotaryServer is the endpoint serving the Notary trust server - NotaryServer = "https://notary.docker.io" - - // IndexServer = "https://registry-stage.hub.docker.com/v1/" -) - -var ( - // ErrInvalidRepositoryName is an error returned if the repository name did - // not have the correct form - ErrInvalidRepositoryName = errors.New("Invalid repository name (ex: \"registry.domain.tld/myrepos\")") - - emptyServiceConfig = NewServiceConfig(nil) - - // V2Only controls access to legacy registries. If it is set to true via the - // command line flag the daemon will not attempt to contact v1 legacy registries - V2Only = false -) - -// InstallFlags adds command-line options to the top-level flag parser for -// the current process. -func (options *Options) InstallFlags(cmd *flag.FlagSet, usageFn func(string) string) { - options.Mirrors = opts.NewListOpts(ValidateMirror) - cmd.Var(&options.Mirrors, []string{"-registry-mirror"}, usageFn("Preferred Docker registry mirror")) - options.InsecureRegistries = opts.NewListOpts(ValidateIndexName) - cmd.Var(&options.InsecureRegistries, []string{"-insecure-registry"}, usageFn("Enable insecure registry communication")) - cmd.BoolVar(&V2Only, []string{"-disable-legacy-registry"}, false, usageFn("Do not contact legacy registries")) -} - -// NewServiceConfig returns a new instance of ServiceConfig -func NewServiceConfig(options *Options) *registrytypes.ServiceConfig { - if options == nil { - options = &Options{ - Mirrors: opts.NewListOpts(nil), - InsecureRegistries: opts.NewListOpts(nil), - } - } - - // Localhost is by default considered as an insecure registry - // This is a stop-gap for people who are running a private registry on localhost (especially on Boot2docker). - // - // TODO: should we deprecate this once it is easier for people to set up a TLS registry or change - // daemon flags on boot2docker? - options.InsecureRegistries.Set("127.0.0.0/8") - - config := ®istrytypes.ServiceConfig{ - InsecureRegistryCIDRs: make([]*registrytypes.NetIPNet, 0), - IndexConfigs: make(map[string]*registrytypes.IndexInfo, 0), - // Hack: Bypass setting the mirrors to IndexConfigs since they are going away - // and Mirrors are only for the official registry anyways. - Mirrors: options.Mirrors.GetAll(), - } - // Split --insecure-registry into CIDR and registry-specific settings. - for _, r := range options.InsecureRegistries.GetAll() { - // Check if CIDR was passed to --insecure-registry - _, ipnet, err := net.ParseCIDR(r) - if err == nil { - // Valid CIDR. - config.InsecureRegistryCIDRs = append(config.InsecureRegistryCIDRs, (*registrytypes.NetIPNet)(ipnet)) - } else { - // Assume `host:port` if not CIDR. - config.IndexConfigs[r] = ®istrytypes.IndexInfo{ - Name: r, - Mirrors: make([]string, 0), - Secure: false, - Official: false, - } - } - } - - // Configure public registry. - config.IndexConfigs[IndexName] = ®istrytypes.IndexInfo{ - Name: IndexName, - Mirrors: config.Mirrors, - Secure: true, - Official: true, - } - - return config -} - -// isSecureIndex returns false if the provided indexName is part of the list of insecure registries -// Insecure registries accept HTTP and/or accept HTTPS with certificates from unknown CAs. -// -// The list of insecure registries can contain an element with CIDR notation to specify a whole subnet. -// If the subnet contains one of the IPs of the registry specified by indexName, the latter is considered -// insecure. -// -// indexName should be a URL.Host (`host:port` or `host`) where the `host` part can be either a domain name -// or an IP address. If it is a domain name, then it will be resolved in order to check if the IP is contained -// in a subnet. If the resolving is not successful, isSecureIndex will only try to match hostname to any element -// of insecureRegistries. -func isSecureIndex(config *registrytypes.ServiceConfig, indexName string) bool { - // Check for configured index, first. This is needed in case isSecureIndex - // is called from anything besides newIndexInfo, in order to honor per-index configurations. - if index, ok := config.IndexConfigs[indexName]; ok { - return index.Secure - } - - host, _, err := net.SplitHostPort(indexName) - if err != nil { - // assume indexName is of the form `host` without the port and go on. - host = indexName - } - - addrs, err := lookupIP(host) - if err != nil { - ip := net.ParseIP(host) - if ip != nil { - addrs = []net.IP{ip} - } - - // if ip == nil, then `host` is neither an IP nor it could be looked up, - // either because the index is unreachable, or because the index is behind an HTTP proxy. - // So, len(addrs) == 0 and we're not aborting. - } - - // Try CIDR notation only if addrs has any elements, i.e. if `host`'s IP could be determined. - for _, addr := range addrs { - for _, ipnet := range config.InsecureRegistryCIDRs { - // check if the addr falls in the subnet - if (*net.IPNet)(ipnet).Contains(addr) { - return false - } - } - } - - return true -} - -// ValidateMirror validates an HTTP(S) registry mirror -func ValidateMirror(val string) (string, error) { - uri, err := url.Parse(val) - if err != nil { - return "", fmt.Errorf("%s is not a valid URI", val) - } - - if uri.Scheme != "http" && uri.Scheme != "https" { - return "", fmt.Errorf("Unsupported scheme %s", uri.Scheme) - } - - if uri.Path != "" || uri.RawQuery != "" || uri.Fragment != "" { - return "", fmt.Errorf("Unsupported path/query/fragment at end of the URI") - } - - return fmt.Sprintf("%s://%s/", uri.Scheme, uri.Host), nil -} - -// ValidateIndexName validates an index name. -func ValidateIndexName(val string) (string, error) { - if val == reference.LegacyDefaultHostname { - val = reference.DefaultHostname - } - if strings.HasPrefix(val, "-") || strings.HasSuffix(val, "-") { - return "", fmt.Errorf("Invalid index name (%s). Cannot begin or end with a hyphen.", val) - } - return val, nil -} - -func validateNoSchema(reposName string) error { - if strings.Contains(reposName, "://") { - // It cannot contain a scheme! - return ErrInvalidRepositoryName - } - return nil -} - -// newIndexInfo returns IndexInfo configuration from indexName -func newIndexInfo(config *registrytypes.ServiceConfig, indexName string) (*registrytypes.IndexInfo, error) { - var err error - indexName, err = ValidateIndexName(indexName) - if err != nil { - return nil, err - } - - // Return any configured index info, first. - if index, ok := config.IndexConfigs[indexName]; ok { - return index, nil - } - - // Construct a non-configured index info. - index := ®istrytypes.IndexInfo{ - Name: indexName, - Mirrors: make([]string, 0), - Official: false, - } - index.Secure = isSecureIndex(config, indexName) - return index, nil -} - -// GetAuthConfigKey special-cases using the full index address of the official -// index as the AuthConfig key, and uses the (host)name[:port] for private indexes. -func GetAuthConfigKey(index *registrytypes.IndexInfo) string { - if index.Official { - return IndexServer - } - return index.Name -} - -// newRepositoryInfo validates and breaks down a repository name into a RepositoryInfo -func newRepositoryInfo(config *registrytypes.ServiceConfig, name reference.Named) (*RepositoryInfo, error) { - index, err := newIndexInfo(config, name.Hostname()) - if err != nil { - return nil, err - } - official := !strings.ContainsRune(name.Name(), '/') - return &RepositoryInfo{name, index, official}, nil -} - -// ParseRepositoryInfo performs the breakdown of a repository name into a RepositoryInfo, but -// lacks registry configuration. -func ParseRepositoryInfo(reposName reference.Named) (*RepositoryInfo, error) { - return newRepositoryInfo(emptyServiceConfig, reposName) -} - -// ParseSearchIndexInfo will use repository name to get back an indexInfo. -func ParseSearchIndexInfo(reposName string) (*registrytypes.IndexInfo, error) { - indexName, _ := splitReposSearchTerm(reposName) - - indexInfo, err := newIndexInfo(emptyServiceConfig, indexName) - if err != nil { - return nil, err - } - return indexInfo, nil -} diff --git a/vendor/github.com/hyperhq/hypercli/registry/config_unix.go b/vendor/github.com/hyperhq/hypercli/registry/config_unix.go deleted file mode 100644 index df970181d..000000000 --- a/vendor/github.com/hyperhq/hypercli/registry/config_unix.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build !windows - -package registry - -const ( - // DefaultV1Registry is the URI of the default v1 registry - DefaultV1Registry = "https://index.docker.io" - - // DefaultV2Registry is the URI of the default v2 registry - DefaultV2Registry = "https://registry-1.docker.io" -) - -var ( - // CertsDir is the directory where certificates are stored - CertsDir = "/etc/docker/certs.d" -) - -// cleanPath is used to ensure that a directory name is valid on the target -// platform. It will be passed in something *similar* to a URL such as -// https:/index.docker.io/v1. Not all platforms support directory names -// which contain those characters (such as : on Windows) -func cleanPath(s string) string { - return s -} diff --git a/vendor/github.com/hyperhq/hypercli/registry/config_windows.go b/vendor/github.com/hyperhq/hypercli/registry/config_windows.go deleted file mode 100644 index d01b2618a..000000000 --- a/vendor/github.com/hyperhq/hypercli/registry/config_windows.go +++ /dev/null @@ -1,30 +0,0 @@ -package registry - -import ( - "os" - "path/filepath" - "strings" -) - -const ( - // DefaultV1Registry is the URI of the default v1 registry - DefaultV1Registry = "https://registry-win-tp3.docker.io" - - // DefaultV2Registry is the URI of the default (official) v2 registry. - // This is the windows-specific endpoint. - // - // Currently it is a TEMPORARY link that allows Microsoft to continue - // development of Docker Engine for Windows. - DefaultV2Registry = "https://registry-win-tp3.docker.io" -) - -// CertsDir is the directory where certificates are stored -var CertsDir = os.Getenv("programdata") + `\docker\certs.d` - -// cleanPath is used to ensure that a directory name is valid on the target -// platform. It will be passed in something *similar* to a URL such as -// https:\index.docker.io\v1. Not all platforms support directory names -// which contain those characters (such as : on Windows) -func cleanPath(s string) string { - return filepath.FromSlash(strings.Replace(s, ":", "", -1)) -} diff --git a/vendor/github.com/hyperhq/hypercli/registry/endpoint.go b/vendor/github.com/hyperhq/hypercli/registry/endpoint.go deleted file mode 100644 index 3b6306a82..000000000 --- a/vendor/github.com/hyperhq/hypercli/registry/endpoint.go +++ /dev/null @@ -1,277 +0,0 @@ -package registry - -import ( - "crypto/tls" - "encoding/json" - "fmt" - "io/ioutil" - "net" - "net/http" - "net/url" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/client/transport" - registrytypes "github.com/hyperhq/hyper-api/types/registry" -) - -// for mocking in unit tests -var lookupIP = net.LookupIP - -// scans string for api version in the URL path. returns the trimmed address, if version found, string and API version. -func scanForAPIVersion(address string) (string, APIVersion) { - var ( - chunks []string - apiVersionStr string - ) - - if strings.HasSuffix(address, "/") { - address = address[:len(address)-1] - } - - chunks = strings.Split(address, "/") - apiVersionStr = chunks[len(chunks)-1] - - for k, v := range apiVersions { - if apiVersionStr == v { - address = strings.Join(chunks[:len(chunks)-1], "/") - return address, k - } - } - - return address, APIVersionUnknown -} - -// NewEndpoint parses the given address to return a registry endpoint. v can be used to -// specify a specific endpoint version -func NewEndpoint(index *registrytypes.IndexInfo, userAgent string, metaHeaders http.Header, v APIVersion) (*Endpoint, error) { - tlsConfig, err := newTLSConfig(index.Name, index.Secure) - if err != nil { - return nil, err - } - endpoint, err := newEndpoint(GetAuthConfigKey(index), tlsConfig, userAgent, metaHeaders) - if err != nil { - return nil, err - } - if v != APIVersionUnknown { - endpoint.Version = v - } - if err := validateEndpoint(endpoint); err != nil { - return nil, err - } - - return endpoint, nil -} - -func validateEndpoint(endpoint *Endpoint) error { - logrus.Debugf("pinging registry endpoint %s", endpoint) - - // Try HTTPS ping to registry - endpoint.URL.Scheme = "https" - if _, err := endpoint.Ping(); err != nil { - if endpoint.IsSecure { - // If registry is secure and HTTPS failed, show user the error and tell them about `--insecure-registry` - // in case that's what they need. DO NOT accept unknown CA certificates, and DO NOT fallback to HTTP. - return fmt.Errorf("invalid registry endpoint %s: %v. If this private registry supports only HTTP or HTTPS with an unknown CA certificate, please add `--insecure-registry %s` to the daemon's arguments. In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; simply place the CA certificate at /etc/docker/certs.d/%s/ca.crt", endpoint, err, endpoint.URL.Host, endpoint.URL.Host) - } - - // If registry is insecure and HTTPS failed, fallback to HTTP. - logrus.Debugf("Error from registry %q marked as insecure: %v. Insecurely falling back to HTTP", endpoint, err) - endpoint.URL.Scheme = "http" - - var err2 error - if _, err2 = endpoint.Ping(); err2 == nil { - return nil - } - - return fmt.Errorf("invalid registry endpoint %q. HTTPS attempt: %v. HTTP attempt: %v", endpoint, err, err2) - } - - return nil -} - -func newEndpoint(address string, tlsConfig *tls.Config, userAgent string, metaHeaders http.Header) (*Endpoint, error) { - var ( - endpoint = new(Endpoint) - trimmedAddress string - err error - ) - - if !strings.HasPrefix(address, "http") { - address = "https://" + address - } - - endpoint.IsSecure = (tlsConfig == nil || !tlsConfig.InsecureSkipVerify) - - trimmedAddress, endpoint.Version = scanForAPIVersion(address) - - if endpoint.URL, err = url.Parse(trimmedAddress); err != nil { - return nil, err - } - - // TODO(tiborvass): make sure a ConnectTimeout transport is used - tr := NewTransport(tlsConfig) - endpoint.client = HTTPClient(transport.NewTransport(tr, DockerHeaders(userAgent, metaHeaders)...)) - return endpoint, nil -} - -// Endpoint stores basic information about a registry endpoint. -type Endpoint struct { - client *http.Client - URL *url.URL - Version APIVersion - IsSecure bool - AuthChallenges []*AuthorizationChallenge - URLBuilder *v2.URLBuilder -} - -// Get the formatted URL for the root of this registry Endpoint -func (e *Endpoint) String() string { - return fmt.Sprintf("%s/v%d/", e.URL, e.Version) -} - -// VersionString returns a formatted string of this -// endpoint address using the given API Version. -func (e *Endpoint) VersionString(version APIVersion) string { - return fmt.Sprintf("%s/v%d/", e.URL, version) -} - -// Path returns a formatted string for the URL -// of this endpoint with the given path appended. -func (e *Endpoint) Path(path string) string { - return fmt.Sprintf("%s/v%d/%s", e.URL, e.Version, path) -} - -// Ping pings the remote endpoint with v2 and v1 pings to determine the API -// version. It returns a PingResult containing the discovered version. The -// PingResult also indicates whether the registry is standalone or not. -func (e *Endpoint) Ping() (PingResult, error) { - // The ping logic to use is determined by the registry endpoint version. - switch e.Version { - case APIVersion1: - return e.pingV1() - case APIVersion2: - return e.pingV2() - } - - // APIVersionUnknown - // We should try v2 first... - e.Version = APIVersion2 - regInfo, errV2 := e.pingV2() - if errV2 == nil { - return regInfo, nil - } - - // ... then fallback to v1. - e.Version = APIVersion1 - regInfo, errV1 := e.pingV1() - if errV1 == nil { - return regInfo, nil - } - - e.Version = APIVersionUnknown - return PingResult{}, fmt.Errorf("unable to ping registry endpoint %s\nv2 ping attempt failed with error: %s\n v1 ping attempt failed with error: %s", e, errV2, errV1) -} - -func (e *Endpoint) pingV1() (PingResult, error) { - logrus.Debugf("attempting v1 ping for registry endpoint %s", e) - - if e.String() == IndexServer { - // Skip the check, we know this one is valid - // (and we never want to fallback to http in case of error) - return PingResult{Standalone: false}, nil - } - - req, err := http.NewRequest("GET", e.Path("_ping"), nil) - if err != nil { - return PingResult{Standalone: false}, err - } - - resp, err := e.client.Do(req) - if err != nil { - return PingResult{Standalone: false}, err - } - - defer resp.Body.Close() - - jsonString, err := ioutil.ReadAll(resp.Body) - if err != nil { - return PingResult{Standalone: false}, fmt.Errorf("error while reading the http response: %s", err) - } - - // If the header is absent, we assume true for compatibility with earlier - // versions of the registry. default to true - info := PingResult{ - Standalone: true, - } - if err := json.Unmarshal(jsonString, &info); err != nil { - logrus.Debugf("Error unmarshalling the _ping PingResult: %s", err) - // don't stop here. Just assume sane defaults - } - if hdr := resp.Header.Get("X-Docker-Registry-Version"); hdr != "" { - logrus.Debugf("Registry version header: '%s'", hdr) - info.Version = hdr - } - logrus.Debugf("PingResult.Version: %q", info.Version) - - standalone := resp.Header.Get("X-Docker-Registry-Standalone") - logrus.Debugf("Registry standalone header: '%s'", standalone) - // Accepted values are "true" (case-insensitive) and "1". - if strings.EqualFold(standalone, "true") || standalone == "1" { - info.Standalone = true - } else if len(standalone) > 0 { - // there is a header set, and it is not "true" or "1", so assume fails - info.Standalone = false - } - logrus.Debugf("PingResult.Standalone: %t", info.Standalone) - return info, nil -} - -func (e *Endpoint) pingV2() (PingResult, error) { - logrus.Debugf("attempting v2 ping for registry endpoint %s", e) - - req, err := http.NewRequest("GET", e.Path(""), nil) - if err != nil { - return PingResult{}, err - } - - resp, err := e.client.Do(req) - if err != nil { - return PingResult{}, err - } - defer resp.Body.Close() - - // The endpoint may have multiple supported versions. - // Ensure it supports the v2 Registry API. - var supportsV2 bool - -HeaderLoop: - for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey("Docker-Distribution-API-Version")] { - for _, versionName := range strings.Fields(supportedVersions) { - if versionName == "registry/2.0" { - supportsV2 = true - break HeaderLoop - } - } - } - - if !supportsV2 { - return PingResult{}, fmt.Errorf("%s does not appear to be a v2 registry endpoint", e) - } - - if resp.StatusCode == http.StatusOK { - // It would seem that no authentication/authorization is required. - // So we don't need to parse/add any authorization schemes. - return PingResult{Standalone: true}, nil - } - - if resp.StatusCode == http.StatusUnauthorized { - // Parse the WWW-Authenticate Header and store the challenges - // on this endpoint object. - e.AuthChallenges = parseAuthHeader(resp.Header) - return PingResult{}, nil - } - - return PingResult{}, fmt.Errorf("v2 registry endpoint returned status %d: %q", resp.StatusCode, http.StatusText(resp.StatusCode)) -} diff --git a/vendor/github.com/hyperhq/hypercli/registry/reference.go b/vendor/github.com/hyperhq/hypercli/registry/reference.go deleted file mode 100644 index e15f83eee..000000000 --- a/vendor/github.com/hyperhq/hypercli/registry/reference.go +++ /dev/null @@ -1,68 +0,0 @@ -package registry - -import ( - "strings" - - "github.com/docker/distribution/digest" -) - -// Reference represents a tag or digest within a repository -type Reference interface { - // HasDigest returns whether the reference has a verifiable - // content addressable reference which may be considered secure. - HasDigest() bool - - // ImageName returns an image name for the given repository - ImageName(string) string - - // Returns a string representation of the reference - String() string -} - -type tagReference struct { - tag string -} - -func (tr tagReference) HasDigest() bool { - return false -} - -func (tr tagReference) ImageName(repo string) string { - return repo + ":" + tr.tag -} - -func (tr tagReference) String() string { - return tr.tag -} - -type digestReference struct { - digest digest.Digest -} - -func (dr digestReference) HasDigest() bool { - return true -} - -func (dr digestReference) ImageName(repo string) string { - return repo + "@" + dr.String() -} - -func (dr digestReference) String() string { - return dr.digest.String() -} - -// ParseReference parses a reference into either a digest or tag reference -func ParseReference(ref string) Reference { - if strings.Contains(ref, ":") { - dgst, err := digest.ParseDigest(ref) - if err == nil { - return digestReference{digest: dgst} - } - } - return tagReference{tag: ref} -} - -// DigestReference creates a digest reference using a digest -func DigestReference(dgst digest.Digest) Reference { - return digestReference{digest: dgst} -} diff --git a/vendor/github.com/hyperhq/hypercli/registry/registry.go b/vendor/github.com/hyperhq/hypercli/registry/registry.go deleted file mode 100644 index 759a1d95c..000000000 --- a/vendor/github.com/hyperhq/hypercli/registry/registry.go +++ /dev/null @@ -1,236 +0,0 @@ -// Package registry contains client primitives to interact with a remote Docker registry. -package registry - -import ( - "crypto/tls" - "crypto/x509" - "errors" - "fmt" - "io/ioutil" - "net" - "net/http" - "os" - "path/filepath" - "runtime" - "strings" - "syscall" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/client" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/go-connections/tlsconfig" -) - -var ( - // ErrAlreadyExists is an error returned if an image being pushed - // already exists on the remote side - ErrAlreadyExists = errors.New("Image already exists") -) - -func init() { - if runtime.GOOS != "linux" { - V2Only = true - } -} - -func newTLSConfig(hostname string, isSecure bool) (*tls.Config, error) { - // PreferredServerCipherSuites should have no effect - tlsConfig := tlsconfig.ServerDefault() - - tlsConfig.InsecureSkipVerify = !isSecure - - if isSecure && CertsDir != "" { - hostDir := filepath.Join(CertsDir, cleanPath(hostname)) - logrus.Debugf("hostDir: %s", hostDir) - if err := ReadCertsDirectory(tlsConfig, hostDir); err != nil { - return nil, err - } - } - - return tlsConfig, nil -} - -func hasFile(files []os.FileInfo, name string) bool { - for _, f := range files { - if f.Name() == name { - return true - } - } - return false -} - -// ReadCertsDirectory reads the directory for TLS certificates -// including roots and certificate pairs and updates the -// provided TLS configuration. -func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error { - fs, err := ioutil.ReadDir(directory) - if err != nil && !os.IsNotExist(err) { - return err - } - - for _, f := range fs { - if strings.HasSuffix(f.Name(), ".crt") { - if tlsConfig.RootCAs == nil { - // TODO(dmcgowan): Copy system pool - tlsConfig.RootCAs = x509.NewCertPool() - } - logrus.Debugf("crt: %s", filepath.Join(directory, f.Name())) - data, err := ioutil.ReadFile(filepath.Join(directory, f.Name())) - if err != nil { - return err - } - tlsConfig.RootCAs.AppendCertsFromPEM(data) - } - if strings.HasSuffix(f.Name(), ".cert") { - certName := f.Name() - keyName := certName[:len(certName)-5] + ".key" - logrus.Debugf("cert: %s", filepath.Join(directory, f.Name())) - if !hasFile(fs, keyName) { - return fmt.Errorf("Missing key %s for client certificate %s. Note that CA certificates should use the extension .crt.", keyName, certName) - } - cert, err := tls.LoadX509KeyPair(filepath.Join(directory, certName), filepath.Join(directory, keyName)) - if err != nil { - return err - } - tlsConfig.Certificates = append(tlsConfig.Certificates, cert) - } - if strings.HasSuffix(f.Name(), ".key") { - keyName := f.Name() - certName := keyName[:len(keyName)-4] + ".cert" - logrus.Debugf("key: %s", filepath.Join(directory, f.Name())) - if !hasFile(fs, certName) { - return fmt.Errorf("Missing client certificate %s for key %s", certName, keyName) - } - } - } - - return nil -} - -// DockerHeaders returns request modifiers with a User-Agent and metaHeaders -func DockerHeaders(userAgent string, metaHeaders http.Header) []transport.RequestModifier { - modifiers := []transport.RequestModifier{} - if userAgent != "" { - modifiers = append(modifiers, transport.NewHeaderRequestModifier(http.Header{ - "User-Agent": []string{userAgent}, - })) - } - if metaHeaders != nil { - modifiers = append(modifiers, transport.NewHeaderRequestModifier(metaHeaders)) - } - return modifiers -} - -// HTTPClient returns a HTTP client structure which uses the given transport -// and contains the necessary headers for redirected requests -func HTTPClient(transport http.RoundTripper) *http.Client { - return &http.Client{ - Transport: transport, - CheckRedirect: addRequiredHeadersToRedirectedRequests, - } -} - -func trustedLocation(req *http.Request) bool { - var ( - trusteds = []string{"docker.com", "docker.io"} - hostname = strings.SplitN(req.Host, ":", 2)[0] - ) - if req.URL.Scheme != "https" { - return false - } - - for _, trusted := range trusteds { - if hostname == trusted || strings.HasSuffix(hostname, "."+trusted) { - return true - } - } - return false -} - -// addRequiredHeadersToRedirectedRequests adds the necessary redirection headers -// for redirected requests -func addRequiredHeadersToRedirectedRequests(req *http.Request, via []*http.Request) error { - if via != nil && via[0] != nil { - if trustedLocation(req) && trustedLocation(via[0]) { - req.Header = via[0].Header - return nil - } - for k, v := range via[0].Header { - if k != "Authorization" { - for _, vv := range v { - req.Header.Add(k, vv) - } - } - } - } - return nil -} - -// ShouldV2Fallback returns true if this error is a reason to fall back to v1. -func ShouldV2Fallback(err errcode.Error) bool { - switch err.Code { - case errcode.ErrorCodeUnauthorized, v2.ErrorCodeManifestUnknown, v2.ErrorCodeNameUnknown: - return true - } - return false -} - -// ErrNoSupport is an error type used for errors indicating that an operation -// is not supported. It encapsulates a more specific error. -type ErrNoSupport struct{ Err error } - -func (e ErrNoSupport) Error() string { - if e.Err == nil { - return "not supported" - } - return e.Err.Error() -} - -// ContinueOnError returns true if we should fallback to the next endpoint -// as a result of this error. -func ContinueOnError(err error) bool { - switch v := err.(type) { - case errcode.Errors: - if len(v) == 0 { - return true - } - return ContinueOnError(v[0]) - case ErrNoSupport: - return ContinueOnError(v.Err) - case errcode.Error: - return ShouldV2Fallback(v) - case *client.UnexpectedHTTPResponseError: - return true - case error: - return !strings.Contains(err.Error(), strings.ToLower(syscall.ENOSPC.Error())) - } - // let's be nice and fallback if the error is a completely - // unexpected one. - // If new errors have to be handled in some way, please - // add them to the switch above. - return true -} - -// NewTransport returns a new HTTP transport. If tlsConfig is nil, it uses the -// default TLS configuration. -func NewTransport(tlsConfig *tls.Config) *http.Transport { - if tlsConfig == nil { - var cfg = tlsconfig.ServerDefault() - tlsConfig = cfg - } - return &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - }).Dial, - TLSHandshakeTimeout: 10 * time.Second, - TLSClientConfig: tlsConfig, - // TODO(dmcgowan): Call close idle connections when complete and use keep alive - DisableKeepAlives: true, - } -} diff --git a/vendor/github.com/hyperhq/hypercli/registry/service.go b/vendor/github.com/hyperhq/hypercli/registry/service.go deleted file mode 100644 index eae55f120..000000000 --- a/vendor/github.com/hyperhq/hypercli/registry/service.go +++ /dev/null @@ -1,188 +0,0 @@ -package registry - -import ( - "crypto/tls" - "net/http" - "net/url" - "strings" - - "github.com/hyperhq/hyper-api/types" - registrytypes "github.com/hyperhq/hyper-api/types/registry" - "github.com/hyperhq/hypercli/reference" -) - -// Service is a registry service. It tracks configuration data such as a list -// of mirrors. -type Service struct { - Config *registrytypes.ServiceConfig -} - -// NewService returns a new instance of Service ready to be -// installed into an engine. -func NewService(options *Options) *Service { - return &Service{ - Config: NewServiceConfig(options), - } -} - -// Auth contacts the public registry with the provided credentials, -// and returns OK if authentication was successful. -// It can be used to verify the validity of a client's credentials. -func (s *Service) Auth(authConfig *types.AuthConfig, userAgent string) (string, error) { - addr := authConfig.ServerAddress - if addr == "" { - // Use the official registry address if not specified. - addr = IndexServer - } - index, err := s.ResolveIndex(addr) - if err != nil { - return "", err - } - - endpointVersion := APIVersion(APIVersionUnknown) - if V2Only { - // Override the endpoint to only attempt a v2 ping - endpointVersion = APIVersion2 - } - - endpoint, err := NewEndpoint(index, userAgent, nil, endpointVersion) - if err != nil { - return "", err - } - authConfig.ServerAddress = endpoint.String() - return Login(authConfig, endpoint) -} - -// splitReposSearchTerm breaks a search term into an index name and remote name -func splitReposSearchTerm(reposName string) (string, string) { - nameParts := strings.SplitN(reposName, "/", 2) - var indexName, remoteName string - if len(nameParts) == 1 || (!strings.Contains(nameParts[0], ".") && - !strings.Contains(nameParts[0], ":") && nameParts[0] != "localhost") { - // This is a Docker Index repos (ex: samalba/hipache or ubuntu) - // 'docker.io' - indexName = IndexName - remoteName = reposName - } else { - indexName = nameParts[0] - remoteName = nameParts[1] - } - return indexName, remoteName -} - -// Search queries the public registry for images matching the specified -// search terms, and returns the results. -func (s *Service) Search(term string, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) { - if err := validateNoSchema(term); err != nil { - return nil, err - } - - indexName, remoteName := splitReposSearchTerm(term) - - index, err := newIndexInfo(s.Config, indexName) - if err != nil { - return nil, err - } - - // *TODO: Search multiple indexes. - endpoint, err := NewEndpoint(index, userAgent, http.Header(headers), APIVersionUnknown) - if err != nil { - return nil, err - } - - r, err := NewSession(endpoint.client, authConfig, endpoint) - if err != nil { - return nil, err - } - - if index.Official { - localName := remoteName - if strings.HasPrefix(localName, "library/") { - // If pull "library/foo", it's stored locally under "foo" - localName = strings.SplitN(localName, "/", 2)[1] - } - - return r.SearchRepositories(localName) - } - return r.SearchRepositories(remoteName) -} - -// ResolveRepository splits a repository name into its components -// and configuration of the associated registry. -func (s *Service) ResolveRepository(name reference.Named) (*RepositoryInfo, error) { - return newRepositoryInfo(s.Config, name) -} - -// ResolveIndex takes indexName and returns index info -func (s *Service) ResolveIndex(name string) (*registrytypes.IndexInfo, error) { - return newIndexInfo(s.Config, name) -} - -// APIEndpoint represents a remote API endpoint -type APIEndpoint struct { - Mirror bool - URL string - Version APIVersion - Official bool - TrimHostname bool - TLSConfig *tls.Config -} - -// ToV1Endpoint returns a V1 API endpoint based on the APIEndpoint -func (e APIEndpoint) ToV1Endpoint(userAgent string, metaHeaders http.Header) (*Endpoint, error) { - return newEndpoint(e.URL, e.TLSConfig, userAgent, metaHeaders) -} - -// TLSConfig constructs a client TLS configuration based on server defaults -func (s *Service) TLSConfig(hostname string) (*tls.Config, error) { - return newTLSConfig(hostname, isSecureIndex(s.Config, hostname)) -} - -func (s *Service) tlsConfigForMirror(mirror string) (*tls.Config, error) { - mirrorURL, err := url.Parse(mirror) - if err != nil { - return nil, err - } - return s.TLSConfig(mirrorURL.Host) -} - -// LookupPullEndpoints creates an list of endpoints to try to pull from, in order of preference. -// It gives preference to v2 endpoints over v1, mirrors over the actual -// registry, and HTTPS over plain HTTP. -func (s *Service) LookupPullEndpoints(repoName reference.Named) (endpoints []APIEndpoint, err error) { - return s.lookupEndpoints(repoName) -} - -// LookupPushEndpoints creates an list of endpoints to try to push to, in order of preference. -// It gives preference to v2 endpoints over v1, and HTTPS over plain HTTP. -// Mirrors are not included. -func (s *Service) LookupPushEndpoints(repoName reference.Named) (endpoints []APIEndpoint, err error) { - allEndpoints, err := s.lookupEndpoints(repoName) - if err == nil { - for _, endpoint := range allEndpoints { - if !endpoint.Mirror { - endpoints = append(endpoints, endpoint) - } - } - } - return endpoints, err -} - -func (s *Service) lookupEndpoints(repoName reference.Named) (endpoints []APIEndpoint, err error) { - endpoints, err = s.lookupV2Endpoints(repoName) - if err != nil { - return nil, err - } - - if V2Only { - return endpoints, nil - } - - legacyEndpoints, err := s.lookupV1Endpoints(repoName) - if err != nil { - return nil, err - } - endpoints = append(endpoints, legacyEndpoints...) - - return endpoints, nil -} diff --git a/vendor/github.com/hyperhq/hypercli/registry/service_v1.go b/vendor/github.com/hyperhq/hypercli/registry/service_v1.go deleted file mode 100644 index 638acb313..000000000 --- a/vendor/github.com/hyperhq/hypercli/registry/service_v1.go +++ /dev/null @@ -1,56 +0,0 @@ -package registry - -import ( - "fmt" - "strings" - - "github.com/docker/go-connections/tlsconfig" - "github.com/hyperhq/hypercli/reference" -) - -func (s *Service) lookupV1Endpoints(repoName reference.Named) (endpoints []APIEndpoint, err error) { - var cfg = tlsconfig.ServerDefault() - tlsConfig := cfg - nameString := repoName.FullName() - if strings.HasPrefix(nameString, DefaultNamespace+"/") { - endpoints = append(endpoints, APIEndpoint{ - URL: DefaultV1Registry, - Version: APIVersion1, - Official: true, - TrimHostname: true, - TLSConfig: tlsConfig, - }) - return endpoints, nil - } - - slashIndex := strings.IndexRune(nameString, '/') - if slashIndex <= 0 { - return nil, fmt.Errorf("invalid repo name: missing '/': %s", nameString) - } - hostname := nameString[:slashIndex] - - tlsConfig, err = s.TLSConfig(hostname) - if err != nil { - return nil, err - } - - endpoints = []APIEndpoint{ - { - URL: "https://" + hostname, - Version: APIVersion1, - TrimHostname: true, - TLSConfig: tlsConfig, - }, - } - - if tlsConfig.InsecureSkipVerify { - endpoints = append(endpoints, APIEndpoint{ // or this - URL: "http://" + hostname, - Version: APIVersion1, - TrimHostname: true, - // used to check if supposed to be secure via InsecureSkipVerify - TLSConfig: tlsConfig, - }) - } - return endpoints, nil -} diff --git a/vendor/github.com/hyperhq/hypercli/registry/service_v2.go b/vendor/github.com/hyperhq/hypercli/registry/service_v2.go deleted file mode 100644 index 47077c94c..000000000 --- a/vendor/github.com/hyperhq/hypercli/registry/service_v2.go +++ /dev/null @@ -1,74 +0,0 @@ -package registry - -import ( - "fmt" - "strings" - - "github.com/docker/go-connections/tlsconfig" - "github.com/hyperhq/hypercli/reference" -) - -func (s *Service) lookupV2Endpoints(repoName reference.Named) (endpoints []APIEndpoint, err error) { - var cfg = tlsconfig.ServerDefault() - tlsConfig := cfg - nameString := repoName.FullName() - if strings.HasPrefix(nameString, DefaultNamespace+"/") { - // v2 mirrors - for _, mirror := range s.Config.Mirrors { - mirrorTLSConfig, err := s.tlsConfigForMirror(mirror) - if err != nil { - return nil, err - } - endpoints = append(endpoints, APIEndpoint{ - URL: mirror, - // guess mirrors are v2 - Version: APIVersion2, - Mirror: true, - TrimHostname: true, - TLSConfig: mirrorTLSConfig, - }) - } - // v2 registry - endpoints = append(endpoints, APIEndpoint{ - URL: DefaultV2Registry, - Version: APIVersion2, - Official: true, - TrimHostname: true, - TLSConfig: tlsConfig, - }) - - return endpoints, nil - } - - slashIndex := strings.IndexRune(nameString, '/') - if slashIndex <= 0 { - return nil, fmt.Errorf("invalid repo name: missing '/': %s", nameString) - } - hostname := nameString[:slashIndex] - - tlsConfig, err = s.TLSConfig(hostname) - if err != nil { - return nil, err - } - - endpoints = []APIEndpoint{ - { - URL: "https://" + hostname, - Version: APIVersion2, - TrimHostname: true, - TLSConfig: tlsConfig, - }, - } - - if tlsConfig.InsecureSkipVerify { - endpoints = append(endpoints, APIEndpoint{ - URL: "http://" + hostname, - Version: APIVersion2, - TrimHostname: true, - // used to check if supposed to be secure via InsecureSkipVerify - TLSConfig: tlsConfig, - }) - } - - return endpoints, nil -} diff --git a/vendor/github.com/hyperhq/hypercli/registry/session.go b/vendor/github.com/hyperhq/hypercli/registry/session.go deleted file mode 100644 index 6b2477ab3..000000000 --- a/vendor/github.com/hyperhq/hypercli/registry/session.go +++ /dev/null @@ -1,770 +0,0 @@ -package registry - -import ( - "bytes" - "crypto/sha256" - "errors" - "sync" - // this is required for some certificates - _ "crypto/sha512" - "encoding/hex" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/http/cookiejar" - "net/url" - "strconv" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/registry/api/errcode" - "github.com/hyperhq/hyper-api/types" - registrytypes "github.com/hyperhq/hyper-api/types/registry" - "github.com/hyperhq/hypercli/pkg/httputils" - "github.com/hyperhq/hypercli/pkg/ioutils" - "github.com/hyperhq/hypercli/pkg/stringid" - "github.com/hyperhq/hypercli/pkg/tarsum" - "github.com/hyperhq/hypercli/reference" -) - -var ( - // ErrRepoNotFound is returned if the repository didn't exist on the - // remote side - ErrRepoNotFound = errors.New("Repository not found") -) - -// A Session is used to communicate with a V1 registry -type Session struct { - indexEndpoint *Endpoint - client *http.Client - // TODO(tiborvass): remove authConfig - authConfig *types.AuthConfig - id string -} - -type authTransport struct { - http.RoundTripper - *types.AuthConfig - - alwaysSetBasicAuth bool - token []string - - mu sync.Mutex // guards modReq - modReq map[*http.Request]*http.Request // original -> modified -} - -// AuthTransport handles the auth layer when communicating with a v1 registry (private or official) -// -// For private v1 registries, set alwaysSetBasicAuth to true. -// -// For the official v1 registry, if there isn't already an Authorization header in the request, -// but there is an X-Docker-Token header set to true, then Basic Auth will be used to set the Authorization header. -// After sending the request with the provided base http.RoundTripper, if an X-Docker-Token header, representing -// a token, is present in the response, then it gets cached and sent in the Authorization header of all subsequent -// requests. -// -// If the server sends a token without the client having requested it, it is ignored. -// -// This RoundTripper also has a CancelRequest method important for correct timeout handling. -func AuthTransport(base http.RoundTripper, authConfig *types.AuthConfig, alwaysSetBasicAuth bool) http.RoundTripper { - if base == nil { - base = http.DefaultTransport - } - return &authTransport{ - RoundTripper: base, - AuthConfig: authConfig, - alwaysSetBasicAuth: alwaysSetBasicAuth, - modReq: make(map[*http.Request]*http.Request), - } -} - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header, len(r.Header)) - for k, s := range r.Header { - r2.Header[k] = append([]string(nil), s...) - } - - return r2 -} - -// RoundTrip changes a HTTP request's headers to add the necessary -// authentication-related headers -func (tr *authTransport) RoundTrip(orig *http.Request) (*http.Response, error) { - // Authorization should not be set on 302 redirect for untrusted locations. - // This logic mirrors the behavior in addRequiredHeadersToRedirectedRequests. - // As the authorization logic is currently implemented in RoundTrip, - // a 302 redirect is detected by looking at the Referrer header as go http package adds said header. - // This is safe as Docker doesn't set Referrer in other scenarios. - if orig.Header.Get("Referer") != "" && !trustedLocation(orig) { - return tr.RoundTripper.RoundTrip(orig) - } - - req := cloneRequest(orig) - tr.mu.Lock() - tr.modReq[orig] = req - tr.mu.Unlock() - - if tr.alwaysSetBasicAuth { - if tr.AuthConfig == nil { - return nil, errors.New("unexpected error: empty auth config") - } - req.SetBasicAuth(tr.Username, tr.Password) - return tr.RoundTripper.RoundTrip(req) - } - - // Don't override - if req.Header.Get("Authorization") == "" { - if req.Header.Get("X-Docker-Token") == "true" && tr.AuthConfig != nil && len(tr.Username) > 0 { - req.SetBasicAuth(tr.Username, tr.Password) - } else if len(tr.token) > 0 { - req.Header.Set("Authorization", "Token "+strings.Join(tr.token, ",")) - } - } - resp, err := tr.RoundTripper.RoundTrip(req) - if err != nil { - delete(tr.modReq, orig) - return nil, err - } - if len(resp.Header["X-Docker-Token"]) > 0 { - tr.token = resp.Header["X-Docker-Token"] - } - resp.Body = &ioutils.OnEOFReader{ - Rc: resp.Body, - Fn: func() { - tr.mu.Lock() - delete(tr.modReq, orig) - tr.mu.Unlock() - }, - } - return resp, nil -} - -// CancelRequest cancels an in-flight request by closing its connection. -func (tr *authTransport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := tr.RoundTripper.(canceler); ok { - tr.mu.Lock() - modReq := tr.modReq[req] - delete(tr.modReq, req) - tr.mu.Unlock() - cr.CancelRequest(modReq) - } -} - -// NewSession creates a new session -// TODO(tiborvass): remove authConfig param once registry client v2 is vendored -func NewSession(client *http.Client, authConfig *types.AuthConfig, endpoint *Endpoint) (r *Session, err error) { - r = &Session{ - authConfig: authConfig, - client: client, - indexEndpoint: endpoint, - id: stringid.GenerateRandomID(), - } - - var alwaysSetBasicAuth bool - - // If we're working with a standalone private registry over HTTPS, send Basic Auth headers - // alongside all our requests. - if endpoint.VersionString(1) != IndexServer && endpoint.URL.Scheme == "https" { - info, err := endpoint.Ping() - if err != nil { - return nil, err - } - if info.Standalone && authConfig != nil { - logrus.Debugf("Endpoint %s is eligible for private registry. Enabling decorator.", endpoint.String()) - alwaysSetBasicAuth = true - } - } - - // Annotate the transport unconditionally so that v2 can - // properly fallback on v1 when an image is not found. - client.Transport = AuthTransport(client.Transport, authConfig, alwaysSetBasicAuth) - - jar, err := cookiejar.New(nil) - if err != nil { - return nil, errors.New("cookiejar.New is not supposed to return an error") - } - client.Jar = jar - - return r, nil -} - -// ID returns this registry session's ID. -func (r *Session) ID() string { - return r.id -} - -// GetRemoteHistory retrieves the history of a given image from the registry. -// It returns a list of the parent's JSON files (including the requested image). -func (r *Session) GetRemoteHistory(imgID, registry string) ([]string, error) { - res, err := r.client.Get(registry + "images/" + imgID + "/ancestry") - if err != nil { - return nil, err - } - defer res.Body.Close() - if res.StatusCode != 200 { - if res.StatusCode == 401 { - return nil, errcode.ErrorCodeUnauthorized.WithArgs() - } - return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Server error: %d trying to fetch remote history for %s", res.StatusCode, imgID), res) - } - - var history []string - if err := json.NewDecoder(res.Body).Decode(&history); err != nil { - return nil, fmt.Errorf("Error while reading the http response: %v", err) - } - - logrus.Debugf("Ancestry: %v", history) - return history, nil -} - -// LookupRemoteImage checks if an image exists in the registry -func (r *Session) LookupRemoteImage(imgID, registry string) error { - res, err := r.client.Get(registry + "images/" + imgID + "/json") - if err != nil { - return err - } - res.Body.Close() - if res.StatusCode != 200 { - return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) - } - return nil -} - -// GetRemoteImageJSON retrieves an image's JSON metadata from the registry. -func (r *Session) GetRemoteImageJSON(imgID, registry string) ([]byte, int64, error) { - res, err := r.client.Get(registry + "images/" + imgID + "/json") - if err != nil { - return nil, -1, fmt.Errorf("Failed to download json: %s", err) - } - defer res.Body.Close() - if res.StatusCode != 200 { - return nil, -1, httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) - } - // if the size header is not present, then set it to '-1' - imageSize := int64(-1) - if hdr := res.Header.Get("X-Docker-Size"); hdr != "" { - imageSize, err = strconv.ParseInt(hdr, 10, 64) - if err != nil { - return nil, -1, err - } - } - - jsonString, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, -1, fmt.Errorf("Failed to parse downloaded json: %v (%s)", err, jsonString) - } - return jsonString, imageSize, nil -} - -// GetRemoteImageLayer retrieves an image layer from the registry -func (r *Session) GetRemoteImageLayer(imgID, registry string, imgSize int64) (io.ReadCloser, error) { - var ( - statusCode = 0 - res *http.Response - err error - imageURL = fmt.Sprintf("%simages/%s/layer", registry, imgID) - ) - - req, err := http.NewRequest("GET", imageURL, nil) - if err != nil { - return nil, fmt.Errorf("Error while getting from the server: %v", err) - } - statusCode = 0 - res, err = r.client.Do(req) - if err != nil { - logrus.Debugf("Error contacting registry %s: %v", registry, err) - if res != nil { - if res.Body != nil { - res.Body.Close() - } - statusCode = res.StatusCode - } - return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", - statusCode, imgID) - } - - if res.StatusCode != 200 { - res.Body.Close() - return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", - res.StatusCode, imgID) - } - - if res.Header.Get("Accept-Ranges") == "bytes" && imgSize > 0 { - logrus.Debugf("server supports resume") - return httputils.ResumableRequestReaderWithInitialResponse(r.client, req, 5, imgSize, res), nil - } - logrus.Debugf("server doesn't support resume") - return res.Body, nil -} - -// GetRemoteTag retrieves the tag named in the askedTag argument from the given -// repository. It queries each of the registries supplied in the registries -// argument, and returns data from the first one that answers the query -// successfully. -func (r *Session) GetRemoteTag(registries []string, repositoryRef reference.Named, askedTag string) (string, error) { - repository := repositoryRef.RemoteName() - - if strings.Count(repository, "/") == 0 { - // This will be removed once the registry supports auto-resolution on - // the "library" namespace - repository = "library/" + repository - } - for _, host := range registries { - endpoint := fmt.Sprintf("%srepositories/%s/tags/%s", host, repository, askedTag) - res, err := r.client.Get(endpoint) - if err != nil { - return "", err - } - - logrus.Debugf("Got status code %d from %s", res.StatusCode, endpoint) - defer res.Body.Close() - - if res.StatusCode == 404 { - return "", ErrRepoNotFound - } - if res.StatusCode != 200 { - continue - } - - var tagID string - if err := json.NewDecoder(res.Body).Decode(&tagID); err != nil { - return "", err - } - return tagID, nil - } - return "", fmt.Errorf("Could not reach any registry endpoint") -} - -// GetRemoteTags retrieves all tags from the given repository. It queries each -// of the registries supplied in the registries argument, and returns data from -// the first one that answers the query successfully. It returns a map with -// tag names as the keys and image IDs as the values. -func (r *Session) GetRemoteTags(registries []string, repositoryRef reference.Named) (map[string]string, error) { - repository := repositoryRef.RemoteName() - - if strings.Count(repository, "/") == 0 { - // This will be removed once the registry supports auto-resolution on - // the "library" namespace - repository = "library/" + repository - } - for _, host := range registries { - endpoint := fmt.Sprintf("%srepositories/%s/tags", host, repository) - res, err := r.client.Get(endpoint) - if err != nil { - return nil, err - } - - logrus.Debugf("Got status code %d from %s", res.StatusCode, endpoint) - defer res.Body.Close() - - if res.StatusCode == 404 { - return nil, ErrRepoNotFound - } - if res.StatusCode != 200 { - continue - } - - result := make(map[string]string) - if err := json.NewDecoder(res.Body).Decode(&result); err != nil { - return nil, err - } - return result, nil - } - return nil, fmt.Errorf("Could not reach any registry endpoint") -} - -func buildEndpointsList(headers []string, indexEp string) ([]string, error) { - var endpoints []string - parsedURL, err := url.Parse(indexEp) - if err != nil { - return nil, err - } - var urlScheme = parsedURL.Scheme - // The registry's URL scheme has to match the Index' - for _, ep := range headers { - epList := strings.Split(ep, ",") - for _, epListElement := range epList { - endpoints = append( - endpoints, - fmt.Sprintf("%s://%s/v1/", urlScheme, strings.TrimSpace(epListElement))) - } - } - return endpoints, nil -} - -// GetRepositoryData returns lists of images and endpoints for the repository -func (r *Session) GetRepositoryData(name reference.Named) (*RepositoryData, error) { - repositoryTarget := fmt.Sprintf("%srepositories/%s/images", r.indexEndpoint.VersionString(1), name.RemoteName()) - - logrus.Debugf("[registry] Calling GET %s", repositoryTarget) - - req, err := http.NewRequest("GET", repositoryTarget, nil) - if err != nil { - return nil, err - } - // this will set basic auth in r.client.Transport and send cached X-Docker-Token headers for all subsequent requests - req.Header.Set("X-Docker-Token", "true") - res, err := r.client.Do(req) - if err != nil { - // check if the error is because of i/o timeout - // and return a non-obtuse error message for users - // "Get https://index.docker.io/v1/repositories/library/busybox/images: i/o timeout" - // was a top search on the docker user forum - if isTimeout(err) { - return nil, fmt.Errorf("Network timed out while trying to connect to %s. You may want to check your internet connection or if you are behind a proxy.", repositoryTarget) - } - return nil, fmt.Errorf("Error while pulling image: %v", err) - } - defer res.Body.Close() - if res.StatusCode == 401 { - return nil, errcode.ErrorCodeUnauthorized.WithArgs() - } - // TODO: Right now we're ignoring checksums in the response body. - // In the future, we need to use them to check image validity. - if res.StatusCode == 404 { - return nil, httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code: %d", res.StatusCode), res) - } else if res.StatusCode != 200 { - errBody, err := ioutil.ReadAll(res.Body) - if err != nil { - logrus.Debugf("Error reading response body: %s", err) - } - return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to pull repository %s: %q", res.StatusCode, name.RemoteName(), errBody), res) - } - - var endpoints []string - if res.Header.Get("X-Docker-Endpoints") != "" { - endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.VersionString(1)) - if err != nil { - return nil, err - } - } else { - // Assume the endpoint is on the same host - endpoints = append(endpoints, fmt.Sprintf("%s://%s/v1/", r.indexEndpoint.URL.Scheme, req.URL.Host)) - } - - remoteChecksums := []*ImgData{} - if err := json.NewDecoder(res.Body).Decode(&remoteChecksums); err != nil { - return nil, err - } - - // Forge a better object from the retrieved data - imgsData := make(map[string]*ImgData, len(remoteChecksums)) - for _, elem := range remoteChecksums { - imgsData[elem.ID] = elem - } - - return &RepositoryData{ - ImgList: imgsData, - Endpoints: endpoints, - }, nil -} - -// PushImageChecksumRegistry uploads checksums for an image -func (r *Session) PushImageChecksumRegistry(imgData *ImgData, registry string) error { - u := registry + "images/" + imgData.ID + "/checksum" - - logrus.Debugf("[registry] Calling PUT %s", u) - - req, err := http.NewRequest("PUT", u, nil) - if err != nil { - return err - } - req.Header.Set("X-Docker-Checksum", imgData.Checksum) - req.Header.Set("X-Docker-Checksum-Payload", imgData.ChecksumPayload) - - res, err := r.client.Do(req) - if err != nil { - return fmt.Errorf("Failed to upload metadata: %v", err) - } - defer res.Body.Close() - if len(res.Cookies()) > 0 { - r.client.Jar.SetCookies(req.URL, res.Cookies()) - } - if res.StatusCode != 200 { - errBody, err := ioutil.ReadAll(res.Body) - if err != nil { - return fmt.Errorf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err) - } - var jsonBody map[string]string - if err := json.Unmarshal(errBody, &jsonBody); err != nil { - errBody = []byte(err.Error()) - } else if jsonBody["error"] == "Image already exists" { - return ErrAlreadyExists - } - return fmt.Errorf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody) - } - return nil -} - -// PushImageJSONRegistry pushes JSON metadata for a local image to the registry -func (r *Session) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, registry string) error { - - u := registry + "images/" + imgData.ID + "/json" - - logrus.Debugf("[registry] Calling PUT %s", u) - - req, err := http.NewRequest("PUT", u, bytes.NewReader(jsonRaw)) - if err != nil { - return err - } - req.Header.Add("Content-type", "application/json") - - res, err := r.client.Do(req) - if err != nil { - return fmt.Errorf("Failed to upload metadata: %s", err) - } - defer res.Body.Close() - if res.StatusCode == 401 && strings.HasPrefix(registry, "http://") { - return httputils.NewHTTPRequestError("HTTP code 401, Docker will not send auth headers over HTTP.", res) - } - if res.StatusCode != 200 { - errBody, err := ioutil.ReadAll(res.Body) - if err != nil { - return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) - } - var jsonBody map[string]string - if err := json.Unmarshal(errBody, &jsonBody); err != nil { - errBody = []byte(err.Error()) - } else if jsonBody["error"] == "Image already exists" { - return ErrAlreadyExists - } - return httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody), res) - } - return nil -} - -// PushImageLayerRegistry sends the checksum of an image layer to the registry -func (r *Session) PushImageLayerRegistry(imgID string, layer io.Reader, registry string, jsonRaw []byte) (checksum string, checksumPayload string, err error) { - u := registry + "images/" + imgID + "/layer" - - logrus.Debugf("[registry] Calling PUT %s", u) - - tarsumLayer, err := tarsum.NewTarSum(layer, false, tarsum.Version0) - if err != nil { - return "", "", err - } - h := sha256.New() - h.Write(jsonRaw) - h.Write([]byte{'\n'}) - checksumLayer := io.TeeReader(tarsumLayer, h) - - req, err := http.NewRequest("PUT", u, checksumLayer) - if err != nil { - return "", "", err - } - req.Header.Add("Content-Type", "application/octet-stream") - req.ContentLength = -1 - req.TransferEncoding = []string{"chunked"} - res, err := r.client.Do(req) - if err != nil { - return "", "", fmt.Errorf("Failed to upload layer: %v", err) - } - if rc, ok := layer.(io.Closer); ok { - if err := rc.Close(); err != nil { - return "", "", err - } - } - defer res.Body.Close() - - if res.StatusCode != 200 { - errBody, err := ioutil.ReadAll(res.Body) - if err != nil { - return "", "", httputils.NewHTTPRequestError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) - } - return "", "", httputils.NewHTTPRequestError(fmt.Sprintf("Received HTTP code %d while uploading layer: %q", res.StatusCode, errBody), res) - } - - checksumPayload = "sha256:" + hex.EncodeToString(h.Sum(nil)) - return tarsumLayer.Sum(jsonRaw), checksumPayload, nil -} - -// PushRegistryTag pushes a tag on the registry. -// Remote has the format '/ -func (r *Session) PushRegistryTag(remote reference.Named, revision, tag, registry string) error { - // "jsonify" the string - revision = "\"" + revision + "\"" - path := fmt.Sprintf("repositories/%s/tags/%s", remote.RemoteName(), tag) - - req, err := http.NewRequest("PUT", registry+path, strings.NewReader(revision)) - if err != nil { - return err - } - req.Header.Add("Content-type", "application/json") - req.ContentLength = int64(len(revision)) - res, err := r.client.Do(req) - if err != nil { - return err - } - res.Body.Close() - if res.StatusCode != 200 && res.StatusCode != 201 { - return httputils.NewHTTPRequestError(fmt.Sprintf("Internal server error: %d trying to push tag %s on %s", res.StatusCode, tag, remote.RemoteName()), res) - } - return nil -} - -// PushImageJSONIndex uploads an image list to the repository -func (r *Session) PushImageJSONIndex(remote reference.Named, imgList []*ImgData, validate bool, regs []string) (*RepositoryData, error) { - cleanImgList := []*ImgData{} - if validate { - for _, elem := range imgList { - if elem.Checksum != "" { - cleanImgList = append(cleanImgList, elem) - } - } - } else { - cleanImgList = imgList - } - - imgListJSON, err := json.Marshal(cleanImgList) - if err != nil { - return nil, err - } - var suffix string - if validate { - suffix = "images" - } - u := fmt.Sprintf("%srepositories/%s/%s", r.indexEndpoint.VersionString(1), remote.RemoteName(), suffix) - logrus.Debugf("[registry] PUT %s", u) - logrus.Debugf("Image list pushed to index:\n%s", imgListJSON) - headers := map[string][]string{ - "Content-type": {"application/json"}, - // this will set basic auth in r.client.Transport and send cached X-Docker-Token headers for all subsequent requests - "X-Docker-Token": {"true"}, - } - if validate { - headers["X-Docker-Endpoints"] = regs - } - - // Redirect if necessary - var res *http.Response - for { - if res, err = r.putImageRequest(u, headers, imgListJSON); err != nil { - return nil, err - } - if !shouldRedirect(res) { - break - } - res.Body.Close() - u = res.Header.Get("Location") - logrus.Debugf("Redirected to %s", u) - } - defer res.Body.Close() - - if res.StatusCode == 401 { - return nil, errcode.ErrorCodeUnauthorized.WithArgs() - } - - var tokens, endpoints []string - if !validate { - if res.StatusCode != 200 && res.StatusCode != 201 { - errBody, err := ioutil.ReadAll(res.Body) - if err != nil { - logrus.Debugf("Error reading response body: %s", err) - } - return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push repository %s: %q", res.StatusCode, remote.RemoteName(), errBody), res) - } - tokens = res.Header["X-Docker-Token"] - logrus.Debugf("Auth token: %v", tokens) - - if res.Header.Get("X-Docker-Endpoints") == "" { - return nil, fmt.Errorf("Index response didn't contain any endpoints") - } - endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.VersionString(1)) - if err != nil { - return nil, err - } - } else { - if res.StatusCode != 204 { - errBody, err := ioutil.ReadAll(res.Body) - if err != nil { - logrus.Debugf("Error reading response body: %s", err) - } - return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Error: Status %d trying to push checksums %s: %q", res.StatusCode, remote.RemoteName(), errBody), res) - } - } - - return &RepositoryData{ - Endpoints: endpoints, - }, nil -} - -func (r *Session) putImageRequest(u string, headers map[string][]string, body []byte) (*http.Response, error) { - req, err := http.NewRequest("PUT", u, bytes.NewReader(body)) - if err != nil { - return nil, err - } - req.ContentLength = int64(len(body)) - for k, v := range headers { - req.Header[k] = v - } - response, err := r.client.Do(req) - if err != nil { - return nil, err - } - return response, nil -} - -func shouldRedirect(response *http.Response) bool { - return response.StatusCode >= 300 && response.StatusCode < 400 -} - -// SearchRepositories performs a search against the remote repository -func (r *Session) SearchRepositories(term string) (*registrytypes.SearchResults, error) { - logrus.Debugf("Index server: %s", r.indexEndpoint) - u := r.indexEndpoint.VersionString(1) + "search?q=" + url.QueryEscape(term) - - req, err := http.NewRequest("GET", u, nil) - if err != nil { - return nil, fmt.Errorf("Error while getting from the server: %v", err) - } - // Have the AuthTransport send authentication, when logged in. - req.Header.Set("X-Docker-Token", "true") - res, err := r.client.Do(req) - if err != nil { - return nil, err - } - defer res.Body.Close() - if res.StatusCode != 200 { - return nil, httputils.NewHTTPRequestError(fmt.Sprintf("Unexpected status code %d", res.StatusCode), res) - } - result := new(registrytypes.SearchResults) - return result, json.NewDecoder(res.Body).Decode(result) -} - -// GetAuthConfig returns the authentication settings for a session -// TODO(tiborvass): remove this once registry client v2 is vendored -func (r *Session) GetAuthConfig(withPasswd bool) *types.AuthConfig { - password := "" - if withPasswd { - password = r.authConfig.Password - } - return &types.AuthConfig{ - Username: r.authConfig.Username, - Password: password, - Email: r.authConfig.Email, - } -} - -func isTimeout(err error) bool { - type timeout interface { - Timeout() bool - } - e := err - switch urlErr := err.(type) { - case *url.Error: - e = urlErr.Err - } - t, ok := e.(timeout) - return ok && t.Timeout() -} diff --git a/vendor/github.com/hyperhq/hypercli/registry/token.go b/vendor/github.com/hyperhq/hypercli/registry/token.go deleted file mode 100644 index d91bd4550..000000000 --- a/vendor/github.com/hyperhq/hypercli/registry/token.go +++ /dev/null @@ -1,81 +0,0 @@ -package registry - -import ( - "encoding/json" - "errors" - "fmt" - "net/http" - "net/url" - "strings" -) - -type tokenResponse struct { - Token string `json:"token"` -} - -func getToken(username, password string, params map[string]string, registryEndpoint *Endpoint) (string, error) { - realm, ok := params["realm"] - if !ok { - return "", errors.New("no realm specified for token auth challenge") - } - - realmURL, err := url.Parse(realm) - if err != nil { - return "", fmt.Errorf("invalid token auth challenge realm: %s", err) - } - - if realmURL.Scheme == "" { - if registryEndpoint.IsSecure { - realmURL.Scheme = "https" - } else { - realmURL.Scheme = "http" - } - } - - req, err := http.NewRequest("GET", realmURL.String(), nil) - if err != nil { - return "", err - } - - reqParams := req.URL.Query() - service := params["service"] - scope := params["scope"] - - if service != "" { - reqParams.Add("service", service) - } - - for _, scopeField := range strings.Fields(scope) { - reqParams.Add("scope", scopeField) - } - - if username != "" { - reqParams.Add("account", username) - req.SetBasicAuth(username, password) - } - - req.URL.RawQuery = reqParams.Encode() - - resp, err := registryEndpoint.client.Do(req) - if err != nil { - return "", err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return "", fmt.Errorf("token auth attempt for registry %s: %s request failed with status: %d %s", registryEndpoint, req.URL, resp.StatusCode, http.StatusText(resp.StatusCode)) - } - - decoder := json.NewDecoder(resp.Body) - - tr := new(tokenResponse) - if err = decoder.Decode(tr); err != nil { - return "", fmt.Errorf("unable to decode token response: %s", err) - } - - if tr.Token == "" { - return "", errors.New("authorization server did not include a token in the response") - } - - return tr.Token, nil -} diff --git a/vendor/github.com/hyperhq/hypercli/registry/types.go b/vendor/github.com/hyperhq/hypercli/registry/types.go deleted file mode 100644 index ff6ad92c1..000000000 --- a/vendor/github.com/hyperhq/hypercli/registry/types.go +++ /dev/null @@ -1,70 +0,0 @@ -package registry - -import ( - registrytypes "github.com/hyperhq/hyper-api/types/registry" - "github.com/hyperhq/hypercli/reference" -) - -// RepositoryData tracks the image list, list of endpoints, and list of tokens -// for a repository -type RepositoryData struct { - // ImgList is a list of images in the repository - ImgList map[string]*ImgData - // Endpoints is a list of endpoints returned in X-Docker-Endpoints - Endpoints []string - // Tokens is currently unused (remove it?) - Tokens []string -} - -// ImgData is used to transfer image checksums to and from the registry -type ImgData struct { - // ID is an opaque string that identifies the image - ID string `json:"id"` - Checksum string `json:"checksum,omitempty"` - ChecksumPayload string `json:"-"` - Tag string `json:",omitempty"` -} - -// PingResult contains the information returned when pinging a registry. It -// indicates the registry's version and whether the registry claims to be a -// standalone registry. -type PingResult struct { - // Version is the registry version supplied by the registry in a HTTP - // header - Version string `json:"version"` - // Standalone is set to true if the registry indicates it is a - // standalone registry in the X-Docker-Registry-Standalone - // header - Standalone bool `json:"standalone"` -} - -// APIVersion is an integral representation of an API version (presently -// either 1 or 2) -type APIVersion int - -func (av APIVersion) String() string { - return apiVersions[av] -} - -var apiVersions = map[APIVersion]string{ - 1: "v1", - 2: "v2", -} - -// API Version identifiers. -const ( - APIVersionUnknown = iota - APIVersion1 - APIVersion2 -) - -// RepositoryInfo describes a repository -type RepositoryInfo struct { - reference.Named - // Index points to registry information - Index *registrytypes.IndexInfo - // Official indicates whether the repository is considered official. - // If the registry is official, and the normalized name does not - // contain a '/' (e.g. "foo"), then it is considered an official repo. - Official bool -} diff --git a/vendor/github.com/hyperhq/libcompose/config/convert.go b/vendor/github.com/hyperhq/libcompose/config/convert.go deleted file mode 100644 index b8ca36968..000000000 --- a/vendor/github.com/hyperhq/libcompose/config/convert.go +++ /dev/null @@ -1,43 +0,0 @@ -package config - -import "github.com/hyperhq/libcompose/utils" - -// ConvertV1toV2 converts a v1 service config to a v2 service config -func ConvertV1toV2(v1Services map[string]*ServiceConfigV1, environmentLookup EnvironmentLookup, resourceLookup ResourceLookup) (map[string]*ServiceConfig, error) { - v2Services := make(map[string]*ServiceConfig) - - /* - builds := make(map[string]Build) - logs := make(map[string]Log) - - for name, service := range v1Services { - builds[name] = Build{ - Context: service.Build, - Dockerfile: service.Dockerfile, - } - - v1Services[name].Build = "" - v1Services[name].Dockerfile = "" - - logs[name] = Log{ - Driver: service.LogDriver, - Options: service.LogOpt, - } - - v1Services[name].LogDriver = "" - v1Services[name].LogOpt = nil - } - */ - - if err := utils.Convert(v1Services, &v2Services); err != nil { - return nil, err - } - - /* - for name := range v2Services { - v2Services[name].Build = builds[name] - } - */ - - return v2Services, nil -} diff --git a/vendor/github.com/hyperhq/libcompose/config/hash.go b/vendor/github.com/hyperhq/libcompose/config/hash.go deleted file mode 100644 index 08a114b87..000000000 --- a/vendor/github.com/hyperhq/libcompose/config/hash.go +++ /dev/null @@ -1,95 +0,0 @@ -package config - -import ( - "crypto/sha1" - "encoding/hex" - "fmt" - "io" - "reflect" - "sort" - - "github.com/hyperhq/libcompose/yaml" -) - -// GetServiceHash computes and returns a hash that will identify a service. -// This hash will be then used to detect if the service definition/configuration -// have changed and needs to be recreated. -func GetServiceHash(name string, config *ServiceConfig) string { - hash := sha1.New() - - io.WriteString(hash, name) - - //Get values of Service through reflection - val := reflect.ValueOf(config).Elem() - - //Create slice to sort the keys in Service Config, which allow constant hash ordering - serviceKeys := []string{} - - //Create a data structure of map of values keyed by a string - unsortedKeyValue := make(map[string]interface{}) - - //Get all keys and values in Service Configuration - for i := 0; i < val.NumField(); i++ { - valueField := val.Field(i) - keyField := val.Type().Field(i) - - serviceKeys = append(serviceKeys, keyField.Name) - unsortedKeyValue[keyField.Name] = valueField.Interface() - } - - //Sort serviceKeys alphabetically - sort.Strings(serviceKeys) - - //Go through keys and write hash - for _, serviceKey := range serviceKeys { - serviceValue := unsortedKeyValue[serviceKey] - - io.WriteString(hash, fmt.Sprintf("\n %v: ", serviceKey)) - - switch s := serviceValue.(type) { - case yaml.SliceorMap: - sliceKeys := []string{} - for lkey := range s { - sliceKeys = append(sliceKeys, lkey) - } - sort.Strings(sliceKeys) - - for _, sliceKey := range sliceKeys { - io.WriteString(hash, fmt.Sprintf("%s=%v, ", sliceKey, s[sliceKey])) - } - case yaml.MaporEqualSlice: - for _, sliceKey := range s { - io.WriteString(hash, fmt.Sprintf("%s, ", sliceKey)) - } - case yaml.MaporColonSlice: - for _, sliceKey := range s { - io.WriteString(hash, fmt.Sprintf("%s, ", sliceKey)) - } - case yaml.MaporSpaceSlice: - for _, sliceKey := range s { - io.WriteString(hash, fmt.Sprintf("%s, ", sliceKey)) - } - case yaml.Command: - for _, sliceKey := range s { - io.WriteString(hash, fmt.Sprintf("%s, ", sliceKey)) - } - case yaml.Stringorslice: - sort.Strings(s) - - for _, sliceKey := range s { - io.WriteString(hash, fmt.Sprintf("%s, ", sliceKey)) - } - case []string: - sliceKeys := s - sort.Strings(sliceKeys) - - for _, sliceKey := range sliceKeys { - io.WriteString(hash, fmt.Sprintf("%s, ", sliceKey)) - } - default: - io.WriteString(hash, fmt.Sprintf("%v", serviceValue)) - } - } - - return hex.EncodeToString(hash.Sum(nil)) -} diff --git a/vendor/github.com/hyperhq/libcompose/config/interpolation.go b/vendor/github.com/hyperhq/libcompose/config/interpolation.go deleted file mode 100644 index fc420de9a..000000000 --- a/vendor/github.com/hyperhq/libcompose/config/interpolation.go +++ /dev/null @@ -1,169 +0,0 @@ -package config - -import ( - "bytes" - "fmt" - "strings" - - "github.com/Sirupsen/logrus" -) - -func isNum(c uint8) bool { - return c >= '0' && c <= '9' -} - -func validVariableNameChar(c uint8) bool { - return c == '_' || - c >= 'A' && c <= 'Z' || - c >= 'a' && c <= 'z' || - isNum(c) -} - -func parseVariable(line string, pos int, mapping func(string) string) (string, int, bool) { - var buffer bytes.Buffer - - for ; pos < len(line); pos++ { - c := line[pos] - - switch { - case validVariableNameChar(c): - buffer.WriteByte(c) - default: - return mapping(buffer.String()), pos - 1, true - } - } - - return mapping(buffer.String()), pos, true -} - -func parseVariableWithBraces(line string, pos int, mapping func(string) string) (string, int, bool) { - var buffer bytes.Buffer - - for ; pos < len(line); pos++ { - c := line[pos] - - switch { - case c == '}': - bufferString := buffer.String() - - if bufferString == "" { - return "", 0, false - } - - return mapping(buffer.String()), pos, true - case validVariableNameChar(c): - buffer.WriteByte(c) - default: - return "", 0, false - } - } - - return "", 0, false -} - -func parseInterpolationExpression(line string, pos int, mapping func(string) string) (string, int, bool) { - c := line[pos] - - switch { - case c == '$': - return "$", pos, true - case c == '{': - return parseVariableWithBraces(line, pos+1, mapping) - case !isNum(c) && validVariableNameChar(c): - // Variables can't start with a number - return parseVariable(line, pos, mapping) - default: - return "", 0, false - } -} - -func parseLine(line string, mapping func(string) string) (string, bool) { - var buffer bytes.Buffer - - for pos := 0; pos < len(line); pos++ { - c := line[pos] - switch { - case c == '$': - var replaced string - var success bool - - replaced, pos, success = parseInterpolationExpression(line, pos+1, mapping) - - if !success { - return "", false - } - - buffer.WriteString(replaced) - default: - buffer.WriteByte(c) - } - } - - return buffer.String(), true -} - -func parseConfig(option, service string, data *interface{}, mapping func(string) string) error { - switch typedData := (*data).(type) { - case string: - var success bool - - *data, success = parseLine(typedData, mapping) - - if !success { - return fmt.Errorf("Invalid interpolation format for \"%s\" option in service \"%s\": \"%s\"", option, service, typedData) - } - case []interface{}: - for k, v := range typedData { - err := parseConfig(option, service, &v, mapping) - - if err != nil { - return err - } - - typedData[k] = v - } - case map[interface{}]interface{}: - for k, v := range typedData { - err := parseConfig(option, service, &v, mapping) - - if err != nil { - return err - } - - typedData[k] = v - } - } - - return nil -} - -// Interpolate replaces variables in the raw map representation of the project file -func Interpolate(environmentLookup EnvironmentLookup, config *RawServiceMap) error { - for k, v := range *config { - for k2, v2 := range v { - err := parseConfig(k2, k, &v2, func(s string) string { - values := environmentLookup.Lookup(s, k, nil) - - if len(values) == 0 { - logrus.Warnf("The %s variable is not set. Substituting a blank string.", s) - return "" - } - - // Use first result if many are given - value := values[0] - - // Environment variables come in key=value format - // Return everything past first '=' - return strings.SplitN(value, "=", 2)[1] - }) - - if err != nil { - return err - } - - (*config)[k][k2] = v2 - } - } - - return nil -} diff --git a/vendor/github.com/hyperhq/libcompose/config/merge.go b/vendor/github.com/hyperhq/libcompose/config/merge.go deleted file mode 100644 index 5ca228420..000000000 --- a/vendor/github.com/hyperhq/libcompose/config/merge.go +++ /dev/null @@ -1,150 +0,0 @@ -package config - -import ( - "bufio" - "bytes" - "fmt" - "strings" - - yaml "github.com/cloudfoundry-incubator/candiedyaml" - "github.com/hyperhq/hypercli/pkg/urlutil" -) - -var ( - noMerge = []string{ - "links", - "volumes_from", - } -) - -// Merge merges a compose file into an existing set of service configs -func Merge(existingServices *ServiceConfigs, environmentLookup EnvironmentLookup, resourceLookup ResourceLookup, file string, bytes []byte) (map[string]*ServiceConfig, map[string]*VolumeConfig, map[string]*NetworkConfig, error) { - var config Config - if err := yaml.Unmarshal(bytes, &config); err != nil { - return nil, nil, nil, err - } - - var serviceConfigs map[string]*ServiceConfig - var volumeConfigs map[string]*VolumeConfig - var networkConfigs map[string]*NetworkConfig - if config.Version == "2" { - var err error - serviceConfigs, err = MergeServicesV2(existingServices, environmentLookup, resourceLookup, file, bytes) - if err != nil { - return nil, nil, nil, err - } - volumeConfigs, err = ParseVolumes(environmentLookup, resourceLookup, file, bytes) - if err != nil { - return nil, nil, nil, err - } - networkConfigs, err = ParseNetworks(environmentLookup, resourceLookup, file, bytes) - if err != nil { - return nil, nil, nil, err - } - } else { - serviceConfigsV1, err := MergeServicesV1(existingServices, environmentLookup, resourceLookup, file, bytes) - if err != nil { - return nil, nil, nil, err - } - serviceConfigs, err = ConvertV1toV2(serviceConfigsV1, environmentLookup, resourceLookup) - if err != nil { - return nil, nil, nil, err - } - } - - adjustValues(serviceConfigs) - - return serviceConfigs, volumeConfigs, networkConfigs, nil -} - -func adjustValues(configs map[string]*ServiceConfig) { - // yaml parser turns "no" into "false" but that is not valid for a restart policy - for _, v := range configs { - if v.Restart == "false" { - v.Restart = "no" - } - } -} - -func readEnvFile(resourceLookup ResourceLookup, inFile string, serviceData RawService) (RawService, error) { - if _, ok := serviceData["env_file"]; !ok { - return serviceData, nil - } - envFiles := serviceData["env_file"].([]interface{}) - if len(envFiles) == 0 { - return serviceData, nil - } - - if resourceLookup == nil { - return nil, fmt.Errorf("Can not use env_file in file %s no mechanism provided to load files", inFile) - } - - var vars []interface{} - if _, ok := serviceData["environment"]; ok { - vars = serviceData["environment"].([]interface{}) - } - - for i := len(envFiles) - 1; i >= 0; i-- { - envFile := envFiles[i].(string) - content, _, err := resourceLookup.Lookup(envFile, inFile) - if err != nil { - return nil, err - } - - if err != nil { - return nil, err - } - - scanner := bufio.NewScanner(bytes.NewBuffer(content)) - for scanner.Scan() { - line := strings.TrimSpace(scanner.Text()) - key := strings.SplitAfter(line, "=")[0] - - found := false - for _, v := range vars { - if strings.HasPrefix(v.(string), key) { - found = true - break - } - } - - if !found { - vars = append(vars, line) - } - } - - if scanner.Err() != nil { - return nil, scanner.Err() - } - } - - serviceData["environment"] = vars - - delete(serviceData, "env_file") - - return serviceData, nil -} - -func mergeConfig(baseService, serviceData RawService) RawService { - for k, v := range serviceData { - // Image and build are mutually exclusive in merge - if k == "image" { - delete(baseService, "build") - } else if k == "build" { - delete(baseService, "image") - } - existing, ok := baseService[k] - if ok { - baseService[k] = merge(existing, v) - } else { - baseService[k] = v - } - } - - return baseService -} - -// IsValidRemote checks if the specified string is a valid remote (for builds) -func IsValidRemote(remote string) bool { - return urlutil.IsGitURL(remote) || urlutil.IsURL(remote) -} diff --git a/vendor/github.com/hyperhq/libcompose/config/merge_v1.go b/vendor/github.com/hyperhq/libcompose/config/merge_v1.go deleted file mode 100644 index 0aebb05bc..000000000 --- a/vendor/github.com/hyperhq/libcompose/config/merge_v1.go +++ /dev/null @@ -1,173 +0,0 @@ -package config - -import ( - "fmt" - "path" - - "github.com/Sirupsen/logrus" - yaml "github.com/cloudfoundry-incubator/candiedyaml" - "github.com/hyperhq/libcompose/utils" -) - -// MergeServicesV1 merges a v1 compose file into an existing set of service configs -func MergeServicesV1(existingServices *ServiceConfigs, environmentLookup EnvironmentLookup, resourceLookup ResourceLookup, file string, bytes []byte) (map[string]*ServiceConfigV1, error) { - datas := make(RawServiceMap) - if err := yaml.Unmarshal(bytes, &datas); err != nil { - return nil, err - } - - if err := Interpolate(environmentLookup, &datas); err != nil { - return nil, err - } - - if err := validate(datas, "v1"); err != nil { - return nil, err - } - - for name, data := range datas { - data, err := parseV1(resourceLookup, environmentLookup, file, data, datas) - if err != nil { - logrus.Errorf("Failed to parse service %s: %v", name, err) - return nil, err - } - - if serviceConfig, ok := existingServices.Get(name); ok { - var rawExistingService RawService - if err := utils.Convert(serviceConfig, &rawExistingService); err != nil { - return nil, err - } - - data = mergeConfig(rawExistingService, data) - } - - datas[name] = data - } - - for name, data := range datas { - err := validateServiceConstraints(data, name) - if err != nil { - return nil, err - } - } - - serviceConfigs := make(map[string]*ServiceConfigV1) - if err := utils.Convert(datas, &serviceConfigs); err != nil { - return nil, err - } - - return serviceConfigs, nil -} - -func parseV1(resourceLookup ResourceLookup, environmentLookup EnvironmentLookup, inFile string, serviceData RawService, datas RawServiceMap) (RawService, error) { - serviceData, err := readEnvFile(resourceLookup, inFile, serviceData) - if err != nil { - return nil, err - } - - //serviceData = resolveContextV1(inFile, serviceData) - - value, ok := serviceData["extends"] - if !ok { - return serviceData, nil - } - - mapValue, ok := value.(map[interface{}]interface{}) - if !ok { - return serviceData, nil - } - - if resourceLookup == nil { - return nil, fmt.Errorf("Can not use extends in file %s no mechanism provided to files", inFile) - } - - file := asString(mapValue["file"]) - service := asString(mapValue["service"]) - - if service == "" { - return serviceData, nil - } - - var baseService RawService - - if file == "" { - if serviceData, ok := datas[service]; ok { - baseService, err = parseV1(resourceLookup, environmentLookup, inFile, serviceData, datas) - } else { - return nil, fmt.Errorf("Failed to find service %s to extend", service) - } - } else { - bytes, resolved, err := resourceLookup.Lookup(file, inFile) - if err != nil { - logrus.Errorf("Failed to lookup file %s: %v", file, err) - return nil, err - } - - var baseRawServices RawServiceMap - if err := yaml.Unmarshal(bytes, &baseRawServices); err != nil { - return nil, err - } - - err = Interpolate(environmentLookup, &baseRawServices) - if err != nil { - return nil, err - } - - if err := validate(baseRawServices, "v1"); err != nil { - return nil, err - } - - baseService, ok = baseRawServices[service] - if !ok { - return nil, fmt.Errorf("Failed to find service %s in file %s", service, file) - } - - baseService, err = parseV1(resourceLookup, environmentLookup, resolved, baseService, baseRawServices) - } - - if err != nil { - return nil, err - } - - baseService = clone(baseService) - - logrus.Debugf("Merging %#v, %#v", baseService, serviceData) - - for _, k := range noMerge { - if _, ok := baseService[k]; ok { - source := file - if source == "" { - source = inFile - } - return nil, fmt.Errorf("Cannot extend service '%s' in %s: services with '%s' cannot be extended", service, source, k) - } - } - - baseService = mergeConfig(baseService, serviceData) - - logrus.Debugf("Merged result %#v", baseService) - - return baseService, nil -} - -func resolveContextV1(inFile string, serviceData RawService) RawService { - context := asString(serviceData["build"]) - if context == "" { - return serviceData - } - - if IsValidRemote(context) { - return serviceData - } - - current := path.Dir(inFile) - - if context == "." { - context = current - } else { - current = path.Join(current, context) - } - - serviceData["build"] = current - - return serviceData -} diff --git a/vendor/github.com/hyperhq/libcompose/config/merge_v2.go b/vendor/github.com/hyperhq/libcompose/config/merge_v2.go deleted file mode 100644 index fd565c805..000000000 --- a/vendor/github.com/hyperhq/libcompose/config/merge_v2.go +++ /dev/null @@ -1,211 +0,0 @@ -package config - -import ( - "fmt" - "path" - - "github.com/Sirupsen/logrus" - yaml "github.com/cloudfoundry-incubator/candiedyaml" - "github.com/hyperhq/libcompose/utils" -) - -// MergeServicesV2 merges a v2 compose file into an existing set of service configs -func MergeServicesV2(existingServices *ServiceConfigs, environmentLookup EnvironmentLookup, resourceLookup ResourceLookup, file string, bytes []byte) (map[string]*ServiceConfig, error) { - var config Config - if err := yaml.Unmarshal(bytes, &config); err != nil { - return nil, err - } - - datas := config.Services - - if err := Interpolate(environmentLookup, &datas); err != nil { - return nil, err - } - /* - data := RawServiceMap{ - "services": map[string]interface{}{}, - } - for k, v := range datas { - data["services"][k] = v - } - */ - if err := validate(datas, "v2"); err != nil { - return nil, err - } - for name, data := range datas { - data, err := parseV2(resourceLookup, environmentLookup, file, data, datas) - if err != nil { - logrus.Errorf("Failed to parse service %s: %v", name, err) - return nil, err - } - - if serviceConfig, ok := existingServices.Get(name); ok { - var rawExistingService RawService - if err := utils.Convert(serviceConfig, &rawExistingService); err != nil { - return nil, err - } - - data = mergeConfig(rawExistingService, data) - } - - datas[name] = data - } - - serviceConfigs := make(map[string]*ServiceConfig) - if err := utils.Convert(datas, &serviceConfigs); err != nil { - return nil, err - } - - return serviceConfigs, nil -} - -// ParseVolumes parses volumes in a compose file -func ParseVolumes(environmentLookup EnvironmentLookup, resourceLookup ResourceLookup, file string, bytes []byte) (map[string]*VolumeConfig, error) { - volumeConfigs := make(map[string]*VolumeConfig) - - var config Config - if err := yaml.Unmarshal(bytes, &config); err != nil { - return nil, err - } - - if err := utils.Convert(config.Volumes, &volumeConfigs); err != nil { - return nil, err - } - - return volumeConfigs, nil -} - -// ParseNetworks parses networks in a compose file -func ParseNetworks(environmentLookup EnvironmentLookup, resourceLookup ResourceLookup, file string, bytes []byte) (map[string]*NetworkConfig, error) { - networkConfigs := make(map[string]*NetworkConfig) - - var config Config - if err := yaml.Unmarshal(bytes, &config); err != nil { - return nil, err - } - - if err := utils.Convert(config.Networks, &networkConfigs); err != nil { - return nil, err - } - - return networkConfigs, nil -} - -func parseV2(resourceLookup ResourceLookup, environmentLookup EnvironmentLookup, inFile string, serviceData RawService, datas RawServiceMap) (RawService, error) { - serviceData, err := readEnvFile(resourceLookup, inFile, serviceData) - if err != nil { - return nil, err - } - - //serviceData = resolveContextV2(inFile, serviceData) - - value, ok := serviceData["extends"] - if !ok { - return serviceData, nil - } - - mapValue, ok := value.(map[interface{}]interface{}) - if !ok { - return serviceData, nil - } - - if resourceLookup == nil { - return nil, fmt.Errorf("Can not use extends in file %s no mechanism provided to files", inFile) - } - - file := asString(mapValue["file"]) - service := asString(mapValue["service"]) - - if service == "" { - return serviceData, nil - } - - var baseService RawService - - if file == "" { - if serviceData, ok := datas[service]; ok { - baseService, err = parseV2(resourceLookup, environmentLookup, inFile, serviceData, datas) - } else { - return nil, fmt.Errorf("Failed to find service %s to extend", service) - } - } else { - bytes, resolved, err := resourceLookup.Lookup(file, inFile) - if err != nil { - logrus.Errorf("Failed to lookup file %s: %v", file, err) - return nil, err - } - - var config Config - if err := yaml.Unmarshal(bytes, &config); err != nil { - return nil, err - } - - baseRawServices := config.Services - - err = Interpolate(environmentLookup, &baseRawServices) - if err != nil { - return nil, err - } - if err = validate(datas, "v2"); err != nil { - return nil, err - } - - baseService, ok = baseRawServices[service] - if !ok { - return nil, fmt.Errorf("Failed to find service %s in file %s", service, file) - } - - baseService, err = parseV2(resourceLookup, environmentLookup, resolved, baseService, baseRawServices) - } - - if err != nil { - return nil, err - } - - baseService = clone(baseService) - - logrus.Debugf("Merging %#v, %#v", baseService, serviceData) - - for _, k := range noMerge { - if _, ok := baseService[k]; ok { - source := file - if source == "" { - source = inFile - } - return nil, fmt.Errorf("Cannot extend service '%s' in %s: services with '%s' cannot be extended", service, source, k) - } - } - - baseService = mergeConfig(baseService, serviceData) - - logrus.Debugf("Merged result %#v", baseService) - - return baseService, nil -} - -func resolveContextV2(inFile string, serviceData RawService) RawService { - if _, ok := serviceData["build"]; !ok { - return serviceData - } - build := serviceData["build"].(map[interface{}]interface{}) - context := asString(build["context"]) - if context == "" { - return serviceData - } - - if IsValidRemote(context) { - return serviceData - } - - current := path.Dir(inFile) - - if context == "." { - context = current - } else { - current = path.Join(current, context) - } - - build["context"] = current - - return serviceData -} diff --git a/vendor/github.com/hyperhq/libcompose/config/schema.go b/vendor/github.com/hyperhq/libcompose/config/schema.go deleted file mode 100644 index bffd56d0e..000000000 --- a/vendor/github.com/hyperhq/libcompose/config/schema.go +++ /dev/null @@ -1,339 +0,0 @@ -package config - -var schemaV1 = `{ - "$schema": "http://json-schema.org/draft-04/schema#", - "id": "config_schema_v1.json", - - "type": "object", - - "patternProperties": { - "^[a-zA-Z0-9._-]+$": { - "$ref": "#/definitions/service" - } - }, - - "additionalProperties": false, - - "definitions": { - "service": { - "id": "#/definitions/service", - "type": "object", - - "properties": { - "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, - "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, - "cgroup_parent": {"type": "string"}, - "command": { - "oneOf": [ - {"type": "string"}, - {"type": "array", "items": {"type": "string"}} - ] - }, - "container_name": {"type": "string"}, - "cpu_shares": {"type": ["number", "string"]}, - "cpu_quota": {"type": ["number", "string"]}, - "cpuset": {"type": "string"}, - "dns": {"$ref": "#/definitions/string_or_list"}, - "dns_search": {"$ref": "#/definitions/string_or_list"}, - "domainname": {"type": "string"}, - "entrypoint": { - "oneOf": [ - {"type": "string"}, - {"type": "array", "items": {"type": "string"}} - ] - }, - "env_file": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, - "environment": {"$ref": "#/definitions/list_or_dict"}, - - "extends": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "object", - - "properties": { - "service": {"type": "string"}, - "file": {"type": "string"} - }, - "required": ["service"], - "additionalProperties": false - } - ] - }, - - "expose": { - "type": "array", - "items": { - "type": ["string", "number"] - }, - "uniqueItems": true - }, - - "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, - "hostname": {"type": "string"}, - "image": {"type": "string"}, - "labels": {"$ref": "#/definitions/list_or_dict"}, - "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, - "mem_limit": {"type": ["number", "string"]}, - "memswap_limit": {"type": ["number", "string"]}, - "noauto_volume": {"type": "boolean"}, - - "ports": { - "type": "array", - "items": { - "type": ["string", "number"] - }, - "uniqueItems": true - }, - - "stop_signal": {"type": "string"}, - "security_groups": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, - "restart": {"type": "string"}, - "stdin_open": {"type": "boolean"}, - "tty": {"type": "boolean"}, - "volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, - "working_dir": {"type": "string"}, - "user": {"type": "string"}, - - "size": {"type": "string"}, - "fip": {"type": "string"} - }, - - "dependencies": { - }, - "additionalProperties": false - }, - - "string_or_list": { - "oneOf": [ - {"type": "string"}, - {"$ref": "#/definitions/list_of_strings"} - ] - }, - - "list_of_strings": { - "type": "array", - "items": {"type": "string"}, - "uniqueItems": true - }, - - "list_or_dict": { - "oneOf": [ - { - "type": "object", - "patternProperties": { - ".+": { - "type": ["string", "number", "null"] - } - }, - "additionalProperties": false - }, - {"type": "array", "items": {"type": "string"}, "uniqueItems": true} - ] - }, - - "constraints": { - "service": { - "id": "#/definitions/constraints/service", - "anyOf": [ - { - "required": ["image"] - } - ] - } - } - } -} -` - -var schemaV2 = `{ - "$schema": "http://json-schema.org/draft-04/schema#", - "id": "config_schema_v2.0.json", - "type": "object", - - "patternProperties": { - "^[a-zA-Z0-9._-]+$": { - "$ref": "#/definitions/service" - } - }, - - "additionalProperties": false, - - "definitions": { - "service": { - "id": "#/definitions/service", - "type": "object", - "properties": { - "cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, - "cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, - "cgroup_parent": {"type": "string"}, - "command": { - "oneOf": [ - {"type": "string"}, - {"type": "array", "items": {"type": "string"}} - ] - }, - "container_name": {"type": "string"}, - "cpu_shares": {"type": ["number", "string"]}, - "cpu_quota": {"type": ["number", "string"]}, - "cpuset": {"type": "string"}, - "depends_on": {"$ref": "#/definitions/list_of_strings"}, - "dns": {"$ref": "#/definitions/string_or_list"}, - "dns_search": {"$ref": "#/definitions/string_or_list"}, - "domainname": {"type": "string"}, - "entrypoint": { - "oneOf": [ - {"type": "string"}, - {"type": "array", "items": {"type": "string"}} - ] - }, - "env_file": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, - "environment": {"$ref": "#/definitions/list_or_dict"}, - "extends": { - "oneOf": [ - { - "type": "string" - }, - { - "type": "object", - - "properties": { - "service": {"type": "string"}, - "file": {"type": "string"} - }, - "required": ["service"], - "additionalProperties": false - } - ] - }, - - "expose": { - "type": "array", - "items": { - "type": ["string", "number"] - }, - "uniqueItems": true - }, - - "external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, - "hostname": {"type": "string"}, - "image": {"type": "string"}, - "labels": {"$ref": "#/definitions/list_or_dict"}, - "links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, - "mem_limit": {"type": ["number", "string"]}, - "memswap_limit": {"type": ["number", "string"]}, - "network_mode": {"type": "string"}, - "noauto_volume": {"type": "boolean"}, - - "networks": { - "oneOf": [ - {"$ref": "#/definitions/list_of_strings"}, - { - "type": "object", - "patternProperties": { - "^[a-zA-Z0-9._-]+$": { - "oneOf": [ - { - "type": "object", - "properties": { - "aliases": {"$ref": "#/definitions/list_of_strings"}, - "ipv4_address": {"type": "string"}, - "ipv6_address": {"type": "string"} - }, - "additionalProperties": false - }, - {"type": "null"} - ] - } - }, - "additionalProperties": false - } - ] - }, - - "ports": { - "type": "array", - "items": { - "type": ["string", "number"] - }, - "uniqueItems": true - }, - - "restart": {"type": "string"}, - "stdin_open": {"type": "boolean"}, - "stop_signal": {"type": "string"}, - "security_groups": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, - "tty": {"type": "boolean"}, - "user": {"type": "string"}, - "volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true}, - "working_dir": {"type": "string"}, - - "size": {"type": "string"}, - "fip": {"type": "string"} - }, - - "additionalProperties": false - }, - - "volume": { - "id": "#/definitions/volume", - "type": ["object", "null"], - "properties": { - "driver_opts": { - "type": "object", - "patternProperties": { - "^.+$": {"type": ["string", "number"]} - } - }, - "external": { - "type": ["boolean", "object"], - "properties": { - "name": {"type": "string"} - } - }, - "additionalProperties": false - }, - "additionalProperties": false - }, - - "string_or_list": { - "oneOf": [ - {"type": "string"}, - {"$ref": "#/definitions/list_of_strings"} - ] - }, - - "list_of_strings": { - "type": "array", - "items": {"type": "string"}, - "uniqueItems": true - }, - - "list_or_dict": { - "oneOf": [ - { - "type": "object", - "patternProperties": { - ".+": { - "type": ["string", "number", "null"] - } - }, - "additionalProperties": false - }, - {"type": "array", "items": {"type": "string"}, "uniqueItems": true} - ] - }, - - "constraints": { - "service": { - "id": "#/definitions/constraints/service", - "anyOf": [ - {"required": ["image"]} - ] - } - } - } -} -` diff --git a/vendor/github.com/hyperhq/libcompose/config/schema_helpers.go b/vendor/github.com/hyperhq/libcompose/config/schema_helpers.go deleted file mode 100644 index 43de4fdb4..000000000 --- a/vendor/github.com/hyperhq/libcompose/config/schema_helpers.go +++ /dev/null @@ -1,96 +0,0 @@ -package config - -import ( - "encoding/json" - "strings" - - "github.com/docker/go-connections/nat" - "github.com/xeipuuv/gojsonschema" -) - -var ( - schemaLoader gojsonschema.JSONLoader - constraintSchemaLoader gojsonschema.JSONLoader - schema map[string]interface{} -) - -type ( - environmentFormatChecker struct{} - portsFormatChecker struct{} -) - -func (checker environmentFormatChecker) IsFormat(input string) bool { - // If the value is a boolean, a warning should be given - // However, we can't determine type since gojsonschema converts the value to a string - // Adding a function with an interface{} parameter to gojsonschema is probably the best way to handle this - return true -} - -func (checker portsFormatChecker) IsFormat(input string) bool { - _, _, err := nat.ParsePortSpecs([]string{input}) - return err == nil -} - -func setupSchemaLoaders(version string) error { - if schema != nil { - return nil - } - - var schemaRaw interface{} - var schemaStr string = schemaV1 - if version == "v2" { - schemaStr = schemaV2 - } - err := json.Unmarshal([]byte(schemaStr), &schemaRaw) - if err != nil { - return err - } - - schema = schemaRaw.(map[string]interface{}) - - gojsonschema.FormatCheckers.Add("environment", environmentFormatChecker{}) - //gojsonschema.FormatCheckers.Add("ports", portsFormatChecker{}) - //gojsonschema.FormatCheckers.Add("expose", portsFormatChecker{}) - schemaLoader = gojsonschema.NewGoLoader(schemaRaw) - - definitions := schema["definitions"].(map[string]interface{}) - constraints := definitions["constraints"].(map[string]interface{}) - service := constraints["service"].(map[string]interface{}) - constraintSchemaLoader = gojsonschema.NewGoLoader(service) - - return nil -} - -// gojsonschema doesn't provide a list of valid types for a property -// This parses the schema manually to find all valid types -func parseValidTypesFromSchema(schema map[string]interface{}, context string) []string { - contextSplit := strings.Split(context, ".") - key := contextSplit[len(contextSplit)-1] - - definitions := schema["definitions"].(map[string]interface{}) - service := definitions["service"].(map[string]interface{}) - properties := service["properties"].(map[string]interface{}) - property := properties[key].(map[string]interface{}) - - var validTypes []string - - if val, ok := property["oneOf"]; ok { - validConditions := val.([]interface{}) - - for _, validCondition := range validConditions { - condition := validCondition.(map[string]interface{}) - validTypes = append(validTypes, condition["type"].(string)) - } - } else if val, ok := property["$ref"]; ok { - reference := val.(string) - if reference == "#/definitions/string_or_list" { - return []string{"string", "array"} - } else if reference == "#/definitions/list_of_strings" { - return []string{"array"} - } else if reference == "#/definitions/list_or_dict" { - return []string{"array", "object"} - } - } - - return validTypes -} diff --git a/vendor/github.com/hyperhq/libcompose/config/types.go b/vendor/github.com/hyperhq/libcompose/config/types.go deleted file mode 100644 index c8c2ff5b6..000000000 --- a/vendor/github.com/hyperhq/libcompose/config/types.go +++ /dev/null @@ -1,238 +0,0 @@ -package config - -import ( - "sync" - - "github.com/hyperhq/libcompose/yaml" -) - -// EnvironmentLookup defines methods to provides environment variable loading. -type EnvironmentLookup interface { - Lookup(key, serviceName string, config *ServiceConfig) []string -} - -// ResourceLookup defines methods to provides file loading. -type ResourceLookup interface { - Lookup(file, relativeTo string) ([]byte, string, error) - ResolvePath(path, inFile string) string -} - -// ServiceConfigV1 holds version 1 of libcompose service configuration -type ServiceConfigV1 struct { - /* - Build string `yaml:"build,omitempty"` - CapAdd []string `yaml:"cap_add,omitempty"` - CapDrop []string `yaml:"cap_drop,omitempty"` - CgroupParent string `yaml:"cgroup_parent,omitempty"` - CPUQuota int64 `yaml:"cpu_quota,omitempty"` - CPUSet string `yaml:"cpuset,omitempty"` - CPUShares int64 `yaml:"cpu_shares,omitempty"` - Devices []string `yaml:"devices,omitempty"` - DNS yaml.Stringorslice `yaml:"dns,omitempty"` - DNSSearch yaml.Stringorslice `yaml:"dns_search,omitempty"` - Dockerfile string `yaml:"dockerfile,omitempty"` - LogDriver string `yaml:"log_driver,omitempty"` - MacAddress string `yaml:"mac_address,omitempty"` - MemLimit int64 `yaml:"mem_limit,omitempty"` - MemSwapLimit int64 `yaml:"memswap_limit,omitempty"` - Name string `yaml:"name,omitempty"` - Net string `yaml:"net,omitempty"` - Pid string `yaml:"pid,omitempty"` - Uts string `yaml:"uts,omitempty"` - Ipc string `yaml:"ipc,omitempty"` - Ports []string `yaml:"ports,omitempty"` - Privileged bool `yaml:"privileged,omitempty"` - ReadOnly bool `yaml:"read_only,omitempty"` - SecurityOpt []string `yaml:"security_opt,omitempty"` - User string `yaml:"user,omitempty"` - VolumeDriver string `yaml:"volume_driver,omitempty"` - VolumesFrom []string `yaml:"volumes_from,omitempty"` - Expose []string `yaml:"expose,omitempty"` - LogOpt map[string]string `yaml:"log_opt,omitempty"` - ExtraHosts []string `yaml:"extra_hosts,omitempty"` - Ulimits yaml.Ulimits `yaml:"ulimits,omitempty"` - */ - Command yaml.Command `yaml:"command,flow,omitempty" json:"command,omitempty"` - ContainerName string `yaml:"container_name,omitempty" json:"container_name,omitempty"` - DomainName string `yaml:"domainname,omitempty" json:"domainname,omitempty"` - Entrypoint yaml.Command `yaml:"entrypoint,flow,omitempty" json:"entrypoint,omitempty"` - EnvFile yaml.Stringorslice `yaml:"env_file,omitempty" json:"env_file,omitempty"` - Environment yaml.MaporEqualSlice `yaml:"environment,omitempty" json:"environment,omitempty"` - Hostname string `yaml:"hostname,omitempty" json:"hostname,omitempty"` - Image string `yaml:"image,omitempty" json:"image,omitempty"` - Labels yaml.SliceorMap `yaml:"labels,omitempty" json:"labels,omitempty"` - Links yaml.MaporColonSlice `yaml:"links,omitempty" json:"links,omitempty"` - Restart string `yaml:"restart,omitempty" json:"restart,omitempty"` - StdinOpen bool `yaml:"stdin_open,omitempty" json:"stdin_open,omitempty"` - Tty bool `yaml:"tty,omitempty" json:"tty,omitempty"` - Volumes []string `yaml:"volumes,omitempty" json:"volumes,omitempty"` - WorkingDir string `yaml:"working_dir,omitempty" json:"working_dir,omitempty"` - ExternalLinks []string `yaml:"external_links,omitempty" json:"external_links,omitempty"` - - Size string `yaml:"size,omitempty" json:"size,omitempty"` - Fip string `yaml:"fip,omitempty" json:"fip,omitempty"` - SecurityGroups []string `yaml:"security_groups,omitempty" json:"security_groups,omitempty"` - NoAutoVolume bool `yaml:"noauto_volume,omitempty" json:"noauto_volume,omitempty"` -} - -// Build holds v2 build information -type Build struct { - Context string `yaml:"context,omitempty"` - Dockerfile string `yaml:"dockerfile,omitempty"` - Args yaml.MaporEqualSlice `yaml:"args,omitempty"` -} - -// Log holds v2 logging information -type Log struct { - Driver string `yaml:"driver,omitempty"` - Options map[string]string `yaml:"options,omitempty"` -} - -// ServiceConfig holds version 2 of libcompose service configuration -type ServiceConfig struct { - /* - Build Build `yaml:"build,omitempty"` - CapAdd []string `yaml:"cap_add,omitempty"` - CapDrop []string `yaml:"cap_drop,omitempty"` - CPUSet string `yaml:"cpuset,omitempty"` - CPUShares int64 `yaml:"cpu_shares,omitempty"` - CPUQuota int64 `yaml:"cpu_quota,omitempty"` - CgroupParent string `yaml:"cgroup_parrent,omitempty"` - Devices []string `yaml:"devices,omitempty"` - DNS yaml.Stringorslice `yaml:"dns,omitempty"` - DNSSearch yaml.Stringorslice `yaml:"dns_search,omitempty"` - Expose []string `yaml:"expose,omitempty"` - Ipc string `yaml:"ipc,omitempty"` - Logging Log `yaml:"logging,omitempty"` - MacAddress string `yaml:"mac_address,omitempty"` - MemLimit int64 `yaml:"mem_limit,omitempty"` - MemSwapLimit int64 `yaml:"memswap_limit,omitempty"` - NetworkMode string `yaml:"network_mode,omitempty"` - Networks []string `yaml:"networks,omitempty"` - Pid string `yaml:"pid,omitempty"` - Ports []string `yaml:"ports,omitempty"` - Privileged bool `yaml:"privileged,omitempty"` - SecurityOpt []string `yaml:"security_opt,omitempty"` - StopSignal string `yaml:"stop_signal,omitempty"` - VolumeDriver string `yaml:"volume_driver,omitempty"` - VolumesFrom []string `yaml:"volumes_from,omitempty"` - Uts string `yaml:"uts,omitempty"` - ReadOnly bool `yaml:"read_only,omitempty"` - User string `yaml:"user,omitempty"` - Ulimits yaml.Ulimits `yaml:"ulimits,omitempty"` - */ - Expose []string `yaml:"expose,omitempty" json:"expose,omitempty"` - Ports []string `yaml:"ports,omitempty" json:"ports,omitempty"` - Command yaml.Command `yaml:"command,flow,omitempty" json:"command,omitempty"` - ContainerName string `yaml:"container_name,omitempty" json:"container_name,omitempty"` - DomainName string `yaml:"domainname,omitempty" json:"domainname,omitempty"` - DependsOn []string `yaml:"depends_on,omitempty" json:"depends_on,omitempty"` - Entrypoint yaml.Command `yaml:"entrypoint,flow,omitempty" json:"entrypoint,omitempty"` - EnvFile yaml.Stringorslice `yaml:"env_file,omitempty" json:"env_file,omitempty"` - Environment yaml.MaporEqualSlice `yaml:"environment,omitempty" json:"environment,omitempty"` - Extends yaml.MaporEqualSlice `yaml:"extends,omitempty" json:"extends,omitempty"` - ExternalLinks []string `yaml:"external_links,omitempty" json:"external_links"` - Image string `yaml:"image,omitempty" json:"image,omitempty"` - Hostname string `yaml:"hostname,omitempty" json:"hostname,omitempty"` - Labels yaml.SliceorMap `yaml:"labels,omitempty" json:"labels,omitempty"` - Links yaml.MaporColonSlice `yaml:"links,omitempty" json:"links,omitempty"` - Volumes []string `yaml:"volumes,omitempty" json:"volumes,omitempty"` - Restart string `yaml:"restart,omitempty" json:"restart,omitempty"` - StdinOpen bool `yaml:"stdin_open,omitempty" json:"stdin_open,omitempty"` - Tty bool `yaml:"tty,omitempty" json:"tty,omitempty"` - WorkingDir string `yaml:"working_dir,omitempty" json:"working_dir,omitempty"` - - Size string `yaml:"size,omitempty" json:"size,omitempty"` - Fip string `yaml:"fip,omitempty" json:"fip,omitempty"` - SecurityGroups []string `yaml:"security_groups,omitempty" json:"security_groups,omitempty"` - NoAutoVolume bool `yaml:"noauto_volume,omitempty" json:"noauto_volume,omitempty"` -} - -// VolumeConfig holds v2 volume configuration -type VolumeConfig struct { - Driver string `yaml:"driver,omitempty"` - DriverOpts map[string]string `yaml:"driver_opts,omitempty"` - External bool `yaml:"external,omitempty"` -} - -// Ipam holds v2 network IPAM information -type Ipam struct { - Driver string `yaml:"driver,omitempty"` - Config []string `yaml:"config,omitempty"` -} - -// NetworkConfig holds v2 network configuration -type NetworkConfig struct { - Driver string `yaml:"driver,omitempty"` - DriverOpts map[string]string `yaml:"driver_opts,omitempty"` - External bool `yaml:"external,omitempty"` - Ipam Ipam `yaml:"ipam,omitempty"` -} - -// Config holds libcompose top level configuration -type Config struct { - Version string `yaml:"version,omitempty"` - Services RawServiceMap `yaml:"services,omitempty"` - Volumes map[string]*VolumeConfig `yaml:"volumes,omitempty"` - Networks map[string]*NetworkConfig `yaml:"networks,omitempty"` -} - -// NewServiceConfigs initializes a new Configs struct -func NewServiceConfigs() *ServiceConfigs { - return &ServiceConfigs{ - M: make(map[string]*ServiceConfig), - } -} - -// ServiceConfigs holds a concurrent safe map of ServiceConfig -type ServiceConfigs struct { - M map[string]*ServiceConfig - mu sync.RWMutex -} - -// Has checks if the config map has the specified name -func (c *ServiceConfigs) Has(name string) bool { - c.mu.RLock() - defer c.mu.RUnlock() - _, ok := c.M[name] - return ok -} - -// Get returns the config and the presence of the specified name -func (c *ServiceConfigs) Get(name string) (*ServiceConfig, bool) { - c.mu.RLock() - defer c.mu.RUnlock() - service, ok := c.M[name] - return service, ok -} - -// Add add the specifed config with the specified name -func (c *ServiceConfigs) Add(name string, service *ServiceConfig) { - c.mu.Lock() - c.M[name] = service - c.mu.Unlock() -} - -// Len returns the len of the configs -func (c *ServiceConfigs) Len() int { - c.mu.RLock() - defer c.mu.RUnlock() - return len(c.M) -} - -// Keys returns the names of the config -func (c *ServiceConfigs) Keys() []string { - keys := []string{} - c.mu.RLock() - defer c.mu.RUnlock() - for name := range c.M { - keys = append(keys, name) - } - return keys -} - -// RawService is represent a Service in map form unparsed -type RawService map[string]interface{} - -// RawServiceMap is a collection of RawServices -type RawServiceMap map[string]RawService diff --git a/vendor/github.com/hyperhq/libcompose/config/utils.go b/vendor/github.com/hyperhq/libcompose/config/utils.go deleted file mode 100644 index ae9b86cf9..000000000 --- a/vendor/github.com/hyperhq/libcompose/config/utils.go +++ /dev/null @@ -1,42 +0,0 @@ -package config - -func merge(existing, value interface{}) interface{} { - // append strings - if left, lok := existing.([]interface{}); lok { - if right, rok := value.([]interface{}); rok { - return append(left, right...) - } - } - - //merge maps - if left, lok := existing.(map[interface{}]interface{}); lok { - if right, rok := value.(map[interface{}]interface{}); rok { - newLeft := make(map[interface{}]interface{}) - for k, v := range left { - newLeft[k] = v - } - for k, v := range right { - newLeft[k] = v - } - return newLeft - } - } - - return value -} - -func clone(in RawService) RawService { - result := RawService{} - for k, v := range in { - result[k] = v - } - - return result -} - -func asString(obj interface{}) string { - if v, ok := obj.(string); ok { - return v - } - return "" -} diff --git a/vendor/github.com/hyperhq/libcompose/config/validation.go b/vendor/github.com/hyperhq/libcompose/config/validation.go deleted file mode 100644 index 970da0816..000000000 --- a/vendor/github.com/hyperhq/libcompose/config/validation.go +++ /dev/null @@ -1,309 +0,0 @@ -package config - -import ( - "fmt" - "strconv" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/xeipuuv/gojsonschema" -) - -func serviceNameFromErrorField(field string) string { - splitKeys := strings.Split(field, ".") - return splitKeys[0] -} - -func keyNameFromErrorField(field string) string { - splitKeys := strings.Split(field, ".") - - if len(splitKeys) > 0 { - return splitKeys[len(splitKeys)-1] - } - - return "" -} - -func containsTypeError(resultError gojsonschema.ResultError) bool { - contextSplit := strings.Split(resultError.Context().String(), ".") - _, err := strconv.Atoi(contextSplit[len(contextSplit)-1]) - return err == nil -} - -func addArticle(s string) string { - switch s[0] { - case 'a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U': - return "an " + s - default: - return "a " + s - } -} - -// Gets the value in a service map at a given error context -func getValue(val interface{}, context string) string { - keys := strings.Split(context, ".") - - if keys[0] == "(root)" { - keys = keys[1:] - } - - for i, k := range keys { - switch typedVal := (val).(type) { - case string: - return typedVal - case []interface{}: - if index, err := strconv.Atoi(k); err == nil { - val = typedVal[index] - } - case RawServiceMap: - val = typedVal[k] - case RawService: - val = typedVal[k] - case map[interface{}]interface{}: - val = typedVal[k] - } - - if i == len(keys)-1 { - return fmt.Sprint(val) - } - } - - return "" -} - -// Converts map[interface{}]interface{} to map[string]interface{} recursively -// gojsonschema only accepts map[string]interface{} -func convertServiceMapKeysToStrings(serviceMap RawServiceMap) RawServiceMap { - newServiceMap := make(RawServiceMap) - - for k, v := range serviceMap { - newServiceMap[k] = convertServiceKeysToStrings(v) - } - - return newServiceMap -} - -func convertServiceKeysToStrings(service RawService) RawService { - newService := make(RawService) - - for k, v := range service { - newService[k] = convertKeysToStrings(v) - } - - return newService -} - -func convertKeysToStrings(item interface{}) interface{} { - switch typedDatas := item.(type) { - - case map[interface{}]interface{}: - newMap := make(map[string]interface{}) - - for key, value := range typedDatas { - stringKey := key.(string) - newMap[stringKey] = convertKeysToStrings(value) - } - return newMap - - case []interface{}: - // newArray := make([]interface{}, 0) will cause golint to complain - var newArray []interface{} - newArray = make([]interface{}, 0) - - for _, value := range typedDatas { - newArray = append(newArray, convertKeysToStrings(value)) - } - return newArray - - default: - return item - } -} - -var dockerConfigHints = map[string]string{ - /* - "cpu_share": "cpu_shares", - "add_host": "extra_hosts", - "hosts": "extra_hosts", - "extra_host": "extra_hosts", - "device": "devices", - */ - "link": "links", - /* - "memory_swap": "memswap_limit", - "port": "ports", - "privilege": "privileged", - "priviliged": "privileged", - "privilige": "privileged", - */ - "volume": "volumes", - "workdir": "working_dir", -} - -func unsupportedConfigMessage(key string, nextErr gojsonschema.ResultError) string { - service := serviceNameFromErrorField(nextErr.Field()) - - message := fmt.Sprintf("Unsupported config option for %s service: '%s'", service, key) - if val, ok := dockerConfigHints[key]; ok { - message += fmt.Sprintf(" (did you mean '%s'?)", val) - } - - return message -} - -func oneOfMessage(serviceMap RawServiceMap, schema map[string]interface{}, err, nextErr gojsonschema.ResultError) string { - switch nextErr.Type() { - case "additional_property_not_allowed": - property := nextErr.Details()["property"] - - return fmt.Sprintf("contains unsupported option: '%s'", property) - case "invalid_type": - if containsTypeError(nextErr) { - expectedType := addArticle(nextErr.Details()["expected"].(string)) - - return fmt.Sprintf("contains %s, which is an invalid type, it should be %s", getValue(serviceMap, nextErr.Context().String()), expectedType) - } - - validTypes := parseValidTypesFromSchema(schema, err.Context().String()) - - validTypesMsg := addArticle(strings.Join(validTypes, " or ")) - - return fmt.Sprintf("contains an invalid type, it should be %s", validTypesMsg) - case "unique": - contextWithDuplicates := getValue(serviceMap, nextErr.Context().String()) - - return fmt.Sprintf("contains non unique items, please remove duplicates from %s", contextWithDuplicates) - } - - return "" -} - -func invalidTypeMessage(service, key string, err gojsonschema.ResultError) string { - expectedTypesString := err.Details()["expected"].(string) - var expectedTypes []string - - if strings.Contains(expectedTypesString, ",") { - expectedTypes = strings.Split(expectedTypesString[1:len(expectedTypesString)-1], ",") - } else { - expectedTypes = []string{expectedTypesString} - } - - validTypesMsg := addArticle(strings.Join(expectedTypes, " or ")) - - return fmt.Sprintf("Service '%s' configuration key '%s' contains an invalid type, it should be %s.", service, key, validTypesMsg) -} - -func validate(serviceMap RawServiceMap, version string) error { - if err := setupSchemaLoaders(version); err != nil { - return err - } - - serviceMap = convertServiceMapKeysToStrings(serviceMap) - - var validationErrors []string - - dataLoader := gojsonschema.NewGoLoader(serviceMap) - - result, err := gojsonschema.Validate(schemaLoader, dataLoader) - if err != nil { - return err - } - - // gojsonschema can create extraneous "additional_property_not_allowed" errors in some cases - // If this is set, and the error is at root level, skip over that error - skipRootAdditionalPropertyError := false - - if !result.Valid() { - for i := 0; i < len(result.Errors()); i++ { - err := result.Errors()[i] - - if skipRootAdditionalPropertyError && err.Type() == "additional_property_not_allowed" && err.Context().String() == "(root)" { - skipRootAdditionalPropertyError = false - continue - } - - if err.Context().String() == "(root)" { - switch err.Type() { - case "additional_property_not_allowed": - validationErrors = append(validationErrors, fmt.Sprintf("Invalid service name '%s' - only [a-zA-Z0-9\\._\\-] characters are allowed", err.Field())) - default: - validationErrors = append(validationErrors, err.Description()) - } - - } else { - skipRootAdditionalPropertyError = true - - serviceName := serviceNameFromErrorField(err.Field()) - key := keyNameFromErrorField(err.Field()) - - switch err.Type() { - case "additional_property_not_allowed": - logrus.Infof("%s %s", serviceName, key) - validationErrors = append(validationErrors, unsupportedConfigMessage(key, result.Errors()[i+1])) - case "number_one_of": - validationErrors = append(validationErrors, fmt.Sprintf("Service '%s' configuration key '%s' %s", serviceName, key, oneOfMessage(serviceMap, schema, err, result.Errors()[i+1]))) - - // Next error handled in oneOfMessage, skip over it - i++ - case "invalid_type": - validationErrors = append(validationErrors, invalidTypeMessage(serviceName, key, err)) - case "required": - validationErrors = append(validationErrors, fmt.Sprintf("Service '%s' option '%s' is invalid, %s", serviceName, key, err.Description())) - case "missing_dependency": - dependency := err.Details()["dependency"].(string) - validationErrors = append(validationErrors, fmt.Sprintf("Invalid configuration for '%s' service: dependency '%s' is not satisfied", serviceName, dependency)) - case "unique": - contextWithDuplicates := getValue(serviceMap, err.Context().String()) - validationErrors = append(validationErrors, fmt.Sprintf("Service '%s' configuration key '%s' value %s has non-unique elements", serviceName, key, contextWithDuplicates)) - default: - validationErrors = append(validationErrors, fmt.Sprintf("Service '%s' configuration key %s value %s", serviceName, key, err.Description())) - } - } - } - - if len(validationErrors) > 0 { - return fmt.Errorf(strings.Join(validationErrors, "\n")) - } - } - - return nil -} - -func validateServiceConstraints(service RawService, serviceName string) error { - if err := setupSchemaLoaders("v1"); err != nil { - return err - } - - service = convertServiceKeysToStrings(service) - - var validationErrors []string - - dataLoader := gojsonschema.NewGoLoader(service) - - result, err := gojsonschema.Validate(constraintSchemaLoader, dataLoader) - if err != nil { - return err - } - - if !result.Valid() { - for _, err := range result.Errors() { - if err.Type() == "number_any_of" { - _, containsImage := service["image"] - _, containsBuild := service["build"] - _, containsDockerfile := service["dockerfile"] - - if containsImage && containsBuild { - validationErrors = append(validationErrors, fmt.Sprintf("Service '%s' has both an image and build path specified. A service can either be built to image or use an existing image, not both.", serviceName)) - } else if !containsImage && !containsBuild { - validationErrors = append(validationErrors, fmt.Sprintf("Service '%s' has neither an image nor a build path specified. Exactly one must be provided.", serviceName)) - } else if containsImage && containsDockerfile { - validationErrors = append(validationErrors, fmt.Sprintf("Service '%s' has both an image and alternate Dockerfile. A service can either be built to image or use an existing image, not both.", serviceName)) - } - } - } - - return fmt.Errorf(strings.Join(validationErrors, "\n")) - } - - return nil -} diff --git a/vendor/github.com/hyperhq/libcompose/utils/util.go b/vendor/github.com/hyperhq/libcompose/utils/util.go deleted file mode 100644 index 868eb92a8..000000000 --- a/vendor/github.com/hyperhq/libcompose/utils/util.go +++ /dev/null @@ -1,136 +0,0 @@ -package utils - -import ( - "encoding/json" - "sync" - - "github.com/Sirupsen/logrus" - yaml "github.com/cloudfoundry-incubator/candiedyaml" -) - -// InParallel holds a pool and a waitgroup to execute tasks in parallel and to be able -// to wait for completion of all tasks. -type InParallel struct { - wg sync.WaitGroup - pool sync.Pool -} - -// Add runs the specified task in parallel and adds it to the waitGroup. -func (i *InParallel) Add(task func() error) { - i.wg.Add(1) - - go func() { - defer i.wg.Done() - err := task() - if err != nil { - i.pool.Put(err) - } - }() -} - -// Wait waits for all tasks to complete and returns the latest error encountered if any. -func (i *InParallel) Wait() error { - i.wg.Wait() - obj := i.pool.Get() - if err, ok := obj.(error); ok { - return err - } - return nil -} - -// ConvertByJSON converts a struct (src) to another one (target) using json marshalling/unmarshalling. -// If the structure are not compatible, this will throw an error as the unmarshalling will fail. -func ConvertByJSON(src, target interface{}) error { - newBytes, err := json.Marshal(src) - if err != nil { - return err - } - - err = json.Unmarshal(newBytes, target) - if err != nil { - logrus.Errorf("Failed to unmarshall: %v\n%s", err, string(newBytes)) - } - return err -} - -// Convert converts a struct (src) to another one (target) using yaml marshalling/unmarshalling. -// If the structure are not compatible, this will throw an error as the unmarshalling will fail. -func Convert(src, target interface{}) error { - newBytes, err := yaml.Marshal(src) - if err != nil { - return err - } - - err = yaml.Unmarshal(newBytes, target) - if err != nil { - logrus.Errorf("Failed to unmarshall: %v\n%s", err, string(newBytes)) - } - return err -} - -// CopySlice creates an exact copy of the provided string slice -func CopySlice(s []string) []string { - if s == nil { - return nil - } - r := make([]string, len(s)) - copy(r, s) - return r -} - -// CopyMap creates an exact copy of the provided string-to-string map -func CopyMap(m map[string]string) map[string]string { - if m == nil { - return nil - } - r := map[string]string{} - for k, v := range m { - r[k] = v - } - return r -} - -// FilterStringSet accepts a string set `s` (in the form of `map[string]bool`) and a filtering function `f` -// and returns a string set containing only the strings `x` for which `f(x) == true` -func FilterStringSet(s map[string]bool, f func(x string) bool) map[string]bool { - result := map[string]bool{} - for k := range s { - if f(k) { - result[k] = true - } - } - return result -} - -// FilterString returns a json representation of the specified map -// that is used as filter for docker. -func FilterString(data map[string][]string) string { - // I can't imagine this would ever fail - bytes, _ := json.Marshal(data) - return string(bytes) -} - -// Contains checks if the specified string (key) is present in the specified collection. -func Contains(collection []string, key string) bool { - for _, value := range collection { - if value == key { - return true - } - } - - return false -} - -// Merge performs a union of two string slices: the result is an unordered slice -// that includes every item from either argument exactly once -func Merge(coll1, coll2 []string) []string { - m := map[string]struct{}{} - for _, v := range append(coll1, coll2...) { - m[v] = struct{}{} - } - r := make([]string, 0, len(m)) - for k := range m { - r = append(r, k) - } - return r -} diff --git a/vendor/github.com/hyperhq/libcompose/yaml/types_yaml.go b/vendor/github.com/hyperhq/libcompose/yaml/types_yaml.go deleted file mode 100644 index c1fdc1034..000000000 --- a/vendor/github.com/hyperhq/libcompose/yaml/types_yaml.go +++ /dev/null @@ -1,288 +0,0 @@ -package yaml - -import ( - "fmt" - "reflect" - "sort" - "strconv" - "strings" - - "github.com/docker/engine-api/types/strslice" - "github.com/flynn/go-shlex" -) - -// Stringorslice represents a string or an array of strings. -// Using engine-api Strslice and augment it with YAML marshalling stuff. -type Stringorslice strslice.StrSlice - -// UnmarshalYAML implements the Unmarshaller interface. -func (s *Stringorslice) UnmarshalYAML(tag string, value interface{}) error { - switch value := value.(type) { - case []interface{}: - parts, err := toStrings(value) - if err != nil { - return err - } - *s = parts - case string: - *s = []string{value} - default: - return fmt.Errorf("Failed to unmarshal Stringorslice: %#v", value) - } - return nil -} - -// Ulimits represents a list of Ulimit. -// It is, however, represented in yaml as keys (and thus map in Go) -type Ulimits struct { - Elements []Ulimit -} - -// MarshalYAML implements the Marshaller interface. -func (u Ulimits) MarshalYAML() (tag string, value interface{}, err error) { - ulimitMap := make(map[string]Ulimit) - for _, ulimit := range u.Elements { - ulimitMap[ulimit.Name] = ulimit - } - return "", ulimitMap, nil -} - -// UnmarshalYAML implements the Unmarshaller interface. -func (u *Ulimits) UnmarshalYAML(tag string, value interface{}) error { - ulimits := make(map[string]Ulimit) - yamlUlimits := reflect.ValueOf(value) - switch yamlUlimits.Kind() { - case reflect.Map: - for _, key := range yamlUlimits.MapKeys() { - var name string - var soft, hard int64 - mapValue := yamlUlimits.MapIndex(key).Elem() - name = key.Elem().String() - switch mapValue.Kind() { - case reflect.Int64: - soft = mapValue.Int() - hard = mapValue.Int() - case reflect.Map: - if len(mapValue.MapKeys()) != 2 { - return fmt.Errorf("Failed to unmarshal Ulimit: %#v", mapValue) - } - for _, subKey := range mapValue.MapKeys() { - subValue := mapValue.MapIndex(subKey).Elem() - switch subKey.Elem().String() { - case "soft": - soft = subValue.Int() - case "hard": - hard = subValue.Int() - } - } - default: - return fmt.Errorf("Failed to unmarshal Ulimit: %#v, %v", mapValue, mapValue.Kind()) - } - ulimits[name] = Ulimit{ - Name: name, - ulimitValues: ulimitValues{ - Soft: soft, - Hard: hard, - }, - } - } - keys := make([]string, 0, len(ulimits)) - for key := range ulimits { - keys = append(keys, key) - } - sort.Strings(keys) - for _, key := range keys { - u.Elements = append(u.Elements, ulimits[key]) - } - default: - return fmt.Errorf("Failed to unmarshal Ulimit: %#v", value) - } - return nil -} - -// Ulimit represents ulimit information. -type Ulimit struct { - ulimitValues - Name string -} - -type ulimitValues struct { - Soft int64 `yaml:"soft"` - Hard int64 `yaml:"hard"` -} - -// MarshalYAML implements the Marshaller interface. -func (u Ulimit) MarshalYAML() (tag string, value interface{}, err error) { - if u.Soft == u.Hard { - return "", u.Soft, nil - } - return "", u.ulimitValues, err -} - -// NewUlimit creates a Ulimit based on the specified parts. -func NewUlimit(name string, soft int64, hard int64) Ulimit { - return Ulimit{ - Name: name, - ulimitValues: ulimitValues{ - Soft: soft, - Hard: hard, - }, - } -} - -// Command represents a docker command, can be a string or an array of strings. -type Command strslice.StrSlice - -// UnmarshalYAML implements the Unmarshaller interface. -func (s *Command) UnmarshalYAML(tag string, value interface{}) error { - switch value := value.(type) { - case []interface{}: - parts, err := toStrings(value) - if err != nil { - return err - } - *s = parts - case string: - parts, err := shlex.Split(value) - if err != nil { - return err - } - *s = parts - default: - return fmt.Errorf("Failed to unmarshal Command: %#v", value) - } - return nil -} - -// SliceorMap represents a slice or a map of strings. -type SliceorMap map[string]string - -// UnmarshalYAML implements the Unmarshaller interface. -func (s *SliceorMap) UnmarshalYAML(tag string, value interface{}) error { - switch value := value.(type) { - case map[interface{}]interface{}: - parts := map[string]string{} - for k, v := range value { - if sk, ok := k.(string); ok { - if sv, ok := v.(string); ok { - parts[sk] = sv - } else { - return fmt.Errorf("Cannot unmarshal '%v' of type %T into a string value", v, v) - } - } else { - return fmt.Errorf("Cannot unmarshal '%v' of type %T into a string value", k, k) - } - } - *s = parts - case []interface{}: - parts := map[string]string{} - for _, s := range value { - if str, ok := s.(string); ok { - str := strings.TrimSpace(str) - keyValueSlice := strings.SplitN(str, "=", 2) - - key := keyValueSlice[0] - val := "" - if len(keyValueSlice) == 2 { - val = keyValueSlice[1] - } - parts[key] = val - } else { - return fmt.Errorf("Cannot unmarshal '%v' of type %T into a string value", s, s) - } - } - *s = parts - default: - return fmt.Errorf("Failed to unmarshal SliceorMap: %#v", value) - } - return nil -} - -// MaporEqualSlice represents a slice of strings that gets unmarshal from a -// YAML map into 'key=value' string. -type MaporEqualSlice []string - -// UnmarshalYAML implements the Unmarshaller interface. -func (s *MaporEqualSlice) UnmarshalYAML(tag string, value interface{}) error { - parts, err := unmarshalToStringOrSepMapParts(value, "=") - if err != nil { - return err - } - *s = parts - return nil -} - -// MaporColonSlice represents a slice of strings that gets unmarshal from a -// YAML map into 'key:value' string. -type MaporColonSlice []string - -// UnmarshalYAML implements the Unmarshaller interface. -func (s *MaporColonSlice) UnmarshalYAML(tag string, value interface{}) error { - parts, err := unmarshalToStringOrSepMapParts(value, ":") - if err != nil { - return err - } - *s = parts - return nil -} - -// MaporSpaceSlice represents a slice of strings that gets unmarshal from a -// YAML map into 'key value' string. -type MaporSpaceSlice []string - -// UnmarshalYAML implements the Unmarshaller interface. -func (s *MaporSpaceSlice) UnmarshalYAML(tag string, value interface{}) error { - parts, err := unmarshalToStringOrSepMapParts(value, " ") - if err != nil { - return err - } - *s = parts - return nil -} - -func unmarshalToStringOrSepMapParts(value interface{}, key string) ([]string, error) { - switch value := value.(type) { - case []interface{}: - return toStrings(value) - case map[interface{}]interface{}: - return toSepMapParts(value, key) - default: - return nil, fmt.Errorf("Failed to unmarshal Map or Slice: %#v", value) - } -} - -func toSepMapParts(value map[interface{}]interface{}, sep string) ([]string, error) { - if len(value) == 0 { - return nil, nil - } - parts := make([]string, 0, len(value)) - for k, v := range value { - if sk, ok := k.(string); ok { - if sv, ok := v.(string); ok { - parts = append(parts, sk+sep+sv) - } else if sv, ok := v.(int64); ok { - parts = append(parts, sk+sep+strconv.FormatInt(sv, 10)) - } else { - return nil, fmt.Errorf("Cannot unmarshal '%v' of type %T into a string value", v, v) - } - } else { - return nil, fmt.Errorf("Cannot unmarshal '%v' of type %T into a string value", k, k) - } - } - return parts, nil -} - -func toStrings(s []interface{}) ([]string, error) { - if len(s) == 0 { - return nil, nil - } - r := make([]string, len(s)) - for k, v := range s { - if sv, ok := v.(string); ok { - r[k] = sv - } else { - return nil, fmt.Errorf("Cannot unmarshal '%v' of type %T into a string value", v, v) - } - } - return r, nil -} diff --git a/vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt b/vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt deleted file mode 100644 index 55ede8a42..000000000 --- a/vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2015 xeipuuv - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/xeipuuv/gojsonpointer/pointer.go b/vendor/github.com/xeipuuv/gojsonpointer/pointer.go deleted file mode 100644 index 7faf5d7f9..000000000 --- a/vendor/github.com/xeipuuv/gojsonpointer/pointer.go +++ /dev/null @@ -1,211 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonpointer -// repository-desc An implementation of JSON Pointer - Go language -// -// description Main and unique file. -// -// created 25-02-2013 - -package gojsonpointer - -import ( - "errors" - "fmt" - "reflect" - "strconv" - "strings" -) - -const ( - const_empty_pointer = `` - const_pointer_separator = `/` - - const_invalid_start = `JSON pointer must be empty or start with a "` + const_pointer_separator + `"` -) - -type implStruct struct { - mode string // "SET" or "GET" - - inDocument interface{} - - setInValue interface{} - - getOutNode interface{} - getOutKind reflect.Kind - outError error -} - -type JsonPointer struct { - referenceTokens []string -} - -// NewJsonPointer parses the given string JSON pointer and returns an object -func NewJsonPointer(jsonPointerString string) (p JsonPointer, err error) { - - // Pointer to the root of the document - if len(jsonPointerString) == 0 { - // Keep referenceTokens nil - return - } - if jsonPointerString[0] != '/' { - return p, errors.New(const_invalid_start) - } - - p.referenceTokens = strings.Split(jsonPointerString[1:], const_pointer_separator) - return -} - -// Uses the pointer to retrieve a value from a JSON document -func (p *JsonPointer) Get(document interface{}) (interface{}, reflect.Kind, error) { - - is := &implStruct{mode: "GET", inDocument: document} - p.implementation(is) - return is.getOutNode, is.getOutKind, is.outError - -} - -// Uses the pointer to update a value from a JSON document -func (p *JsonPointer) Set(document interface{}, value interface{}) (interface{}, error) { - - is := &implStruct{mode: "SET", inDocument: document, setInValue: value} - p.implementation(is) - return document, is.outError - -} - -// Uses the pointer to delete a value from a JSON document -func (p *JsonPointer) Delete(document interface{}) (interface{}, error) { - is := &implStruct{mode: "DEL", inDocument: document} - p.implementation(is) - return document, is.outError -} - -// Both Get and Set functions use the same implementation to avoid code duplication -func (p *JsonPointer) implementation(i *implStruct) { - - kind := reflect.Invalid - - // Full document when empty - if len(p.referenceTokens) == 0 { - i.getOutNode = i.inDocument - i.outError = nil - i.getOutKind = kind - i.outError = nil - return - } - - node := i.inDocument - - previousNodes := make([]interface{}, len(p.referenceTokens)) - previousTokens := make([]string, len(p.referenceTokens)) - - for ti, token := range p.referenceTokens { - - isLastToken := ti == len(p.referenceTokens)-1 - previousNodes[ti] = node - previousTokens[ti] = token - - switch v := node.(type) { - - case map[string]interface{}: - decodedToken := decodeReferenceToken(token) - if _, ok := v[decodedToken]; ok { - node = v[decodedToken] - if isLastToken && i.mode == "SET" { - v[decodedToken] = i.setInValue - } else if isLastToken && i.mode =="DEL" { - delete(v,decodedToken) - } - } else if (isLastToken && i.mode == "SET") { - v[decodedToken] = i.setInValue - } else { - i.outError = fmt.Errorf("Object has no key '%s'", decodedToken) - i.getOutKind = reflect.Map - i.getOutNode = nil - return - } - - case []interface{}: - tokenIndex, err := strconv.Atoi(token) - if err != nil { - i.outError = fmt.Errorf("Invalid array index '%s'", token) - i.getOutKind = reflect.Slice - i.getOutNode = nil - return - } - if tokenIndex < 0 || tokenIndex >= len(v) { - i.outError = fmt.Errorf("Out of bound array[0,%d] index '%d'", len(v), tokenIndex) - i.getOutKind = reflect.Slice - i.getOutNode = nil - return - } - - node = v[tokenIndex] - if isLastToken && i.mode == "SET" { - v[tokenIndex] = i.setInValue - } else if isLastToken && i.mode =="DEL" { - v[tokenIndex] = v[len(v)-1] - v[len(v)-1] = nil - v = v[:len(v)-1] - previousNodes[ti-1].(map[string]interface{})[previousTokens[ti-1]] = v - } - - default: - i.outError = fmt.Errorf("Invalid token reference '%s'", token) - i.getOutKind = reflect.ValueOf(node).Kind() - i.getOutNode = nil - return - } - - } - - i.getOutNode = node - i.getOutKind = reflect.ValueOf(node).Kind() - i.outError = nil -} - -// Pointer to string representation function -func (p *JsonPointer) String() string { - - if len(p.referenceTokens) == 0 { - return const_empty_pointer - } - - pointerString := const_pointer_separator + strings.Join(p.referenceTokens, const_pointer_separator) - - return pointerString -} - -// Specific JSON pointer encoding here -// ~0 => ~ -// ~1 => / -// ... and vice versa - -func decodeReferenceToken(token string) string { - step1 := strings.Replace(token, `~1`, `/`, -1) - step2 := strings.Replace(step1, `~0`, `~`, -1) - return step2 -} - -func encodeReferenceToken(token string) string { - step1 := strings.Replace(token, `~`, `~0`, -1) - step2 := strings.Replace(step1, `/`, `~1`, -1) - return step2 -} diff --git a/vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt b/vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt deleted file mode 100644 index 55ede8a42..000000000 --- a/vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2015 xeipuuv - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/xeipuuv/gojsonreference/reference.go b/vendor/github.com/xeipuuv/gojsonreference/reference.go deleted file mode 100644 index 645729130..000000000 --- a/vendor/github.com/xeipuuv/gojsonreference/reference.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonreference -// repository-desc An implementation of JSON Reference - Go language -// -// description Main and unique file. -// -// created 26-02-2013 - -package gojsonreference - -import ( - "errors" - "net/url" - "path/filepath" - "runtime" - "strings" - - "github.com/xeipuuv/gojsonpointer" -) - -const ( - const_fragment_char = `#` -) - -func NewJsonReference(jsonReferenceString string) (JsonReference, error) { - - var r JsonReference - err := r.parse(jsonReferenceString) - return r, err - -} - -type JsonReference struct { - referenceUrl *url.URL - referencePointer gojsonpointer.JsonPointer - - HasFullUrl bool - HasUrlPathOnly bool - HasFragmentOnly bool - HasFileScheme bool - HasFullFilePath bool -} - -func (r *JsonReference) GetUrl() *url.URL { - return r.referenceUrl -} - -func (r *JsonReference) GetPointer() *gojsonpointer.JsonPointer { - return &r.referencePointer -} - -func (r *JsonReference) String() string { - - if r.referenceUrl != nil { - return r.referenceUrl.String() - } - - if r.HasFragmentOnly { - return const_fragment_char + r.referencePointer.String() - } - - return r.referencePointer.String() -} - -func (r *JsonReference) IsCanonical() bool { - return (r.HasFileScheme && r.HasFullFilePath) || (!r.HasFileScheme && r.HasFullUrl) -} - -// "Constructor", parses the given string JSON reference -func (r *JsonReference) parse(jsonReferenceString string) (err error) { - - r.referenceUrl, err = url.Parse(jsonReferenceString) - if err != nil { - return - } - refUrl := r.referenceUrl - - if refUrl.Scheme != "" && refUrl.Host != "" { - r.HasFullUrl = true - } else { - if refUrl.Path != "" { - r.HasUrlPathOnly = true - } else if refUrl.RawQuery == "" && refUrl.Fragment != "" { - r.HasFragmentOnly = true - } - } - - r.HasFileScheme = refUrl.Scheme == "file" - if runtime.GOOS == "windows" { - // on Windows, a file URL may have an extra leading slash, and if it - // doesn't then its first component will be treated as the host by the - // Go runtime - if refUrl.Host == "" && strings.HasPrefix(refUrl.Path, "/") { - r.HasFullFilePath = filepath.IsAbs(refUrl.Path[1:]) - } else { - r.HasFullFilePath = filepath.IsAbs(refUrl.Host + refUrl.Path) - } - } else { - r.HasFullFilePath = filepath.IsAbs(refUrl.Path) - } - - // invalid json-pointer error means url has no json-pointer fragment. simply ignore error - r.referencePointer, _ = gojsonpointer.NewJsonPointer(refUrl.Fragment) - - return -} - -// Creates a new reference from a parent and a child -// If the child cannot inherit from the parent, an error is returned -func (r *JsonReference) Inherits(child JsonReference) (*JsonReference, error) { - if child.GetUrl() == nil { - return nil, errors.New("childUrl is nil!") - } - - if r.GetUrl() == nil { - return nil, errors.New("parentUrl is nil!") - } - - // Get a copy of the parent url to make sure we do not modify the original. - // URL reference resolving fails if the fragment of the child is empty, but the parent's is not. - // The fragment of the child must be used, so the fragment of the parent is manually removed. - parentUrl := *r.GetUrl() - parentUrl.Fragment = "" - - ref, err := NewJsonReference(parentUrl.ResolveReference(child.GetUrl()).String()) - if err != nil { - return nil, err - } - return &ref, err -} diff --git a/vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt b/vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt deleted file mode 100644 index 55ede8a42..000000000 --- a/vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2015 xeipuuv - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/xeipuuv/gojsonschema/errors.go b/vendor/github.com/xeipuuv/gojsonschema/errors.go deleted file mode 100644 index d39f01959..000000000 --- a/vendor/github.com/xeipuuv/gojsonschema/errors.go +++ /dev/null @@ -1,283 +0,0 @@ -package gojsonschema - -import ( - "bytes" - "sync" - "text/template" -) - -var errorTemplates errorTemplate = errorTemplate{template.New("errors-new"), sync.RWMutex{}} - -// template.Template is not thread-safe for writing, so some locking is done -// sync.RWMutex is used for efficiently locking when new templates are created -type errorTemplate struct { - *template.Template - sync.RWMutex -} - -type ( - // RequiredError. ErrorDetails: property string - RequiredError struct { - ResultErrorFields - } - - // InvalidTypeError. ErrorDetails: expected, given - InvalidTypeError struct { - ResultErrorFields - } - - // NumberAnyOfError. ErrorDetails: - - NumberAnyOfError struct { - ResultErrorFields - } - - // NumberOneOfError. ErrorDetails: - - NumberOneOfError struct { - ResultErrorFields - } - - // NumberAllOfError. ErrorDetails: - - NumberAllOfError struct { - ResultErrorFields - } - - // NumberNotError. ErrorDetails: - - NumberNotError struct { - ResultErrorFields - } - - // MissingDependencyError. ErrorDetails: dependency - MissingDependencyError struct { - ResultErrorFields - } - - // InternalError. ErrorDetails: error - InternalError struct { - ResultErrorFields - } - - // EnumError. ErrorDetails: allowed - EnumError struct { - ResultErrorFields - } - - // ArrayNoAdditionalItemsError. ErrorDetails: - - ArrayNoAdditionalItemsError struct { - ResultErrorFields - } - - // ArrayMinItemsError. ErrorDetails: min - ArrayMinItemsError struct { - ResultErrorFields - } - - // ArrayMaxItemsError. ErrorDetails: max - ArrayMaxItemsError struct { - ResultErrorFields - } - - // ItemsMustBeUniqueError. ErrorDetails: type - ItemsMustBeUniqueError struct { - ResultErrorFields - } - - // ArrayMinPropertiesError. ErrorDetails: min - ArrayMinPropertiesError struct { - ResultErrorFields - } - - // ArrayMaxPropertiesError. ErrorDetails: max - ArrayMaxPropertiesError struct { - ResultErrorFields - } - - // AdditionalPropertyNotAllowedError. ErrorDetails: property - AdditionalPropertyNotAllowedError struct { - ResultErrorFields - } - - // InvalidPropertyPatternError. ErrorDetails: property, pattern - InvalidPropertyPatternError struct { - ResultErrorFields - } - - // StringLengthGTEError. ErrorDetails: min - StringLengthGTEError struct { - ResultErrorFields - } - - // StringLengthLTEError. ErrorDetails: max - StringLengthLTEError struct { - ResultErrorFields - } - - // DoesNotMatchPatternError. ErrorDetails: pattern - DoesNotMatchPatternError struct { - ResultErrorFields - } - - // DoesNotMatchFormatError. ErrorDetails: format - DoesNotMatchFormatError struct { - ResultErrorFields - } - - // MultipleOfError. ErrorDetails: multiple - MultipleOfError struct { - ResultErrorFields - } - - // NumberGTEError. ErrorDetails: min - NumberGTEError struct { - ResultErrorFields - } - - // NumberGTError. ErrorDetails: min - NumberGTError struct { - ResultErrorFields - } - - // NumberLTEError. ErrorDetails: max - NumberLTEError struct { - ResultErrorFields - } - - // NumberLTError. ErrorDetails: max - NumberLTError struct { - ResultErrorFields - } -) - -// newError takes a ResultError type and sets the type, context, description, details, value, and field -func newError(err ResultError, context *jsonContext, value interface{}, locale locale, details ErrorDetails) { - var t string - var d string - switch err.(type) { - case *RequiredError: - t = "required" - d = locale.Required() - case *InvalidTypeError: - t = "invalid_type" - d = locale.InvalidType() - case *NumberAnyOfError: - t = "number_any_of" - d = locale.NumberAnyOf() - case *NumberOneOfError: - t = "number_one_of" - d = locale.NumberOneOf() - case *NumberAllOfError: - t = "number_all_of" - d = locale.NumberAllOf() - case *NumberNotError: - t = "number_not" - d = locale.NumberNot() - case *MissingDependencyError: - t = "missing_dependency" - d = locale.MissingDependency() - case *InternalError: - t = "internal" - d = locale.Internal() - case *EnumError: - t = "enum" - d = locale.Enum() - case *ArrayNoAdditionalItemsError: - t = "array_no_additional_items" - d = locale.ArrayNoAdditionalItems() - case *ArrayMinItemsError: - t = "array_min_items" - d = locale.ArrayMinItems() - case *ArrayMaxItemsError: - t = "array_max_items" - d = locale.ArrayMaxItems() - case *ItemsMustBeUniqueError: - t = "unique" - d = locale.Unique() - case *ArrayMinPropertiesError: - t = "array_min_properties" - d = locale.ArrayMinProperties() - case *ArrayMaxPropertiesError: - t = "array_max_properties" - d = locale.ArrayMaxProperties() - case *AdditionalPropertyNotAllowedError: - t = "additional_property_not_allowed" - d = locale.AdditionalPropertyNotAllowed() - case *InvalidPropertyPatternError: - t = "invalid_property_pattern" - d = locale.InvalidPropertyPattern() - case *StringLengthGTEError: - t = "string_gte" - d = locale.StringGTE() - case *StringLengthLTEError: - t = "string_lte" - d = locale.StringLTE() - case *DoesNotMatchPatternError: - t = "pattern" - d = locale.DoesNotMatchPattern() - case *DoesNotMatchFormatError: - t = "format" - d = locale.DoesNotMatchFormat() - case *MultipleOfError: - t = "multiple_of" - d = locale.MultipleOf() - case *NumberGTEError: - t = "number_gte" - d = locale.NumberGTE() - case *NumberGTError: - t = "number_gt" - d = locale.NumberGT() - case *NumberLTEError: - t = "number_lte" - d = locale.NumberLTE() - case *NumberLTError: - t = "number_lt" - d = locale.NumberLT() - } - - err.SetType(t) - err.SetContext(context) - err.SetValue(value) - err.SetDetails(details) - details["field"] = err.Field() - - if _, exists := details["context"]; !exists && context != nil { - details["context"] = context.String() - } - - err.SetDescription(formatErrorDescription(d, details)) -} - -// formatErrorDescription takes a string in the default text/template -// format and converts it to a string with replacements. The fields come -// from the ErrorDetails struct and vary for each type of error. -func formatErrorDescription(s string, details ErrorDetails) string { - - var tpl *template.Template - var descrAsBuffer bytes.Buffer - var err error - - errorTemplates.RLock() - tpl = errorTemplates.Lookup(s) - errorTemplates.RUnlock() - - if tpl == nil { - errorTemplates.Lock() - tpl = errorTemplates.New(s) - - if ErrorTemplateFuncs != nil { - tpl.Funcs(ErrorTemplateFuncs) - } - - tpl, err = tpl.Parse(s) - errorTemplates.Unlock() - - if err != nil { - return err.Error() - } - } - - err = tpl.Execute(&descrAsBuffer, details) - if err != nil { - return err.Error() - } - - return descrAsBuffer.String() -} diff --git a/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go b/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go deleted file mode 100644 index 94bd095ae..000000000 --- a/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go +++ /dev/null @@ -1,203 +0,0 @@ -package gojsonschema - -import ( - "net" - "net/url" - "reflect" - "regexp" - "strings" - "time" -) - -type ( - // FormatChecker is the interface all formatters added to FormatCheckerChain must implement - FormatChecker interface { - IsFormat(input string) bool - } - - // FormatCheckerChain holds the formatters - FormatCheckerChain struct { - formatters map[string]FormatChecker - } - - // EmailFormatter verifies email address formats - EmailFormatChecker struct{} - - // IPV4FormatChecker verifies IP addresses in the ipv4 format - IPV4FormatChecker struct{} - - // IPV6FormatChecker verifies IP addresses in the ipv6 format - IPV6FormatChecker struct{} - - // DateTimeFormatChecker verifies date/time formats per RFC3339 5.6 - // - // Valid formats: - // Partial Time: HH:MM:SS - // Full Date: YYYY-MM-DD - // Full Time: HH:MM:SSZ-07:00 - // Date Time: YYYY-MM-DDTHH:MM:SSZ-0700 - // - // Where - // YYYY = 4DIGIT year - // MM = 2DIGIT month ; 01-12 - // DD = 2DIGIT day-month ; 01-28, 01-29, 01-30, 01-31 based on month/year - // HH = 2DIGIT hour ; 00-23 - // MM = 2DIGIT ; 00-59 - // SS = 2DIGIT ; 00-58, 00-60 based on leap second rules - // T = Literal - // Z = Literal - // - // Note: Nanoseconds are also suported in all formats - // - // http://tools.ietf.org/html/rfc3339#section-5.6 - DateTimeFormatChecker struct{} - - // URIFormatChecker validates a URI with a valid Scheme per RFC3986 - URIFormatChecker struct{} - - // URIReferenceFormatChecker validates a URI or relative-reference per RFC3986 - URIReferenceFormatChecker struct{} - - // HostnameFormatChecker validates a hostname is in the correct format - HostnameFormatChecker struct{} - - // UUIDFormatChecker validates a UUID is in the correct format - UUIDFormatChecker struct{} - - // RegexFormatChecker validates a regex is in the correct format - RegexFormatChecker struct{} -) - -var ( - // Formatters holds the valid formatters, and is a public variable - // so library users can add custom formatters - FormatCheckers = FormatCheckerChain{ - formatters: map[string]FormatChecker{ - "date-time": DateTimeFormatChecker{}, - "hostname": HostnameFormatChecker{}, - "email": EmailFormatChecker{}, - "ipv4": IPV4FormatChecker{}, - "ipv6": IPV6FormatChecker{}, - "uri": URIFormatChecker{}, - "uri-reference": URIReferenceFormatChecker{}, - "uuid": UUIDFormatChecker{}, - "regex": RegexFormatChecker{}, - }, - } - - // Regex credit: https://github.com/asaskevich/govalidator - rxEmail = regexp.MustCompile("^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$") - - // Regex credit: https://www.socketloop.com/tutorials/golang-validate-hostname - rxHostname = regexp.MustCompile(`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$`) - - rxUUID = regexp.MustCompile("^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$") -) - -// Add adds a FormatChecker to the FormatCheckerChain -// The name used will be the value used for the format key in your json schema -func (c *FormatCheckerChain) Add(name string, f FormatChecker) *FormatCheckerChain { - c.formatters[name] = f - - return c -} - -// Remove deletes a FormatChecker from the FormatCheckerChain (if it exists) -func (c *FormatCheckerChain) Remove(name string) *FormatCheckerChain { - delete(c.formatters, name) - - return c -} - -// Has checks to see if the FormatCheckerChain holds a FormatChecker with the given name -func (c *FormatCheckerChain) Has(name string) bool { - _, ok := c.formatters[name] - - return ok -} - -// IsFormat will check an input against a FormatChecker with the given name -// to see if it is the correct format -func (c *FormatCheckerChain) IsFormat(name string, input interface{}) bool { - f, ok := c.formatters[name] - - if !ok { - return false - } - - if !isKind(input, reflect.String) { - return false - } - - inputString := input.(string) - - return f.IsFormat(inputString) -} - -func (f EmailFormatChecker) IsFormat(input string) bool { - return rxEmail.MatchString(input) -} - -// Credit: https://github.com/asaskevich/govalidator -func (f IPV4FormatChecker) IsFormat(input string) bool { - ip := net.ParseIP(input) - return ip != nil && strings.Contains(input, ".") -} - -// Credit: https://github.com/asaskevich/govalidator -func (f IPV6FormatChecker) IsFormat(input string) bool { - ip := net.ParseIP(input) - return ip != nil && strings.Contains(input, ":") -} - -func (f DateTimeFormatChecker) IsFormat(input string) bool { - formats := []string{ - "15:04:05", - "15:04:05Z07:00", - "2006-01-02", - time.RFC3339, - time.RFC3339Nano, - } - - for _, format := range formats { - if _, err := time.Parse(format, input); err == nil { - return true - } - } - - return false -} - -func (f URIFormatChecker) IsFormat(input string) bool { - u, err := url.Parse(input) - if err != nil || u.Scheme == "" { - return false - } - - return true -} - -func (f URIReferenceFormatChecker) IsFormat(input string) bool { - _, err := url.Parse(input) - return err == nil -} - -func (f HostnameFormatChecker) IsFormat(input string) bool { - return rxHostname.MatchString(input) && len(input) < 256 -} - -func (f UUIDFormatChecker) IsFormat(input string) bool { - return rxUUID.MatchString(input) -} - -// IsFormat implements FormatChecker interface. -func (f RegexFormatChecker) IsFormat(input string) bool { - if input == "" { - return true - } - _, err := regexp.Compile(input) - if err != nil { - return false - } - return true -} diff --git a/vendor/github.com/xeipuuv/gojsonschema/internalLog.go b/vendor/github.com/xeipuuv/gojsonschema/internalLog.go deleted file mode 100644 index 4ef7a8d03..000000000 --- a/vendor/github.com/xeipuuv/gojsonschema/internalLog.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Very simple log wrapper. -// Used for debugging/testing purposes. -// -// created 01-01-2015 - -package gojsonschema - -import ( - "log" -) - -const internalLogEnabled = false - -func internalLog(format string, v ...interface{}) { - log.Printf(format, v...) -} diff --git a/vendor/github.com/xeipuuv/gojsonschema/jsonContext.go b/vendor/github.com/xeipuuv/gojsonschema/jsonContext.go deleted file mode 100644 index fcc8d9d6f..000000000 --- a/vendor/github.com/xeipuuv/gojsonschema/jsonContext.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2013 MongoDB, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author tolsen -// author-github https://github.com/tolsen -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Implements a persistent (immutable w/ shared structure) singly-linked list of strings for the purpose of storing a json context -// -// created 04-09-2013 - -package gojsonschema - -import "bytes" - -// jsonContext implements a persistent linked-list of strings -type jsonContext struct { - head string - tail *jsonContext -} - -func newJsonContext(head string, tail *jsonContext) *jsonContext { - return &jsonContext{head, tail} -} - -// String displays the context in reverse. -// This plays well with the data structure's persistent nature with -// Cons and a json document's tree structure. -func (c *jsonContext) String(del ...string) string { - byteArr := make([]byte, 0, c.stringLen()) - buf := bytes.NewBuffer(byteArr) - c.writeStringToBuffer(buf, del) - - return buf.String() -} - -func (c *jsonContext) stringLen() int { - length := 0 - if c.tail != nil { - length = c.tail.stringLen() + 1 // add 1 for "." - } - - length += len(c.head) - return length -} - -func (c *jsonContext) writeStringToBuffer(buf *bytes.Buffer, del []string) { - if c.tail != nil { - c.tail.writeStringToBuffer(buf, del) - - if len(del) > 0 { - buf.WriteString(del[0]) - } else { - buf.WriteString(".") - } - } - - buf.WriteString(c.head) -} diff --git a/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go b/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go deleted file mode 100644 index a77a81e40..000000000 --- a/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go +++ /dev/null @@ -1,341 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Different strategies to load JSON files. -// Includes References (file and HTTP), JSON strings and Go types. -// -// created 01-02-2015 - -package gojsonschema - -import ( - "bytes" - "encoding/json" - "errors" - "io" - "io/ioutil" - "net/http" - "os" - "path/filepath" - "runtime" - "strings" - - - "github.com/xeipuuv/gojsonreference" -) - -var osFS = osFileSystem(os.Open) - -// JSON loader interface - -type JSONLoader interface { - JsonSource() interface{} - LoadJSON() (interface{}, error) - JsonReference() (gojsonreference.JsonReference, error) - LoaderFactory() JSONLoaderFactory -} - -type JSONLoaderFactory interface { - New(source string) JSONLoader -} - -type DefaultJSONLoaderFactory struct { -} - -type FileSystemJSONLoaderFactory struct { - fs http.FileSystem -} - -func (d DefaultJSONLoaderFactory) New(source string) JSONLoader { - return &jsonReferenceLoader{ - fs: osFS, - source: source, - } -} - -func (f FileSystemJSONLoaderFactory) New(source string) JSONLoader { - return &jsonReferenceLoader{ - fs: f.fs, - source: source, - } -} - -// osFileSystem is a functional wrapper for os.Open that implements http.FileSystem. -type osFileSystem func(string) (*os.File, error) - -func (o osFileSystem) Open(name string) (http.File, error) { - return o(name) -} - -// JSON Reference loader -// references are used to load JSONs from files and HTTP - -type jsonReferenceLoader struct { - fs http.FileSystem - source string -} - -func (l *jsonReferenceLoader) JsonSource() interface{} { - return l.source -} - -func (l *jsonReferenceLoader) JsonReference() (gojsonreference.JsonReference, error) { - return gojsonreference.NewJsonReference(l.JsonSource().(string)) -} - -func (l *jsonReferenceLoader) LoaderFactory() JSONLoaderFactory { - return &FileSystemJSONLoaderFactory{ - fs: l.fs, - } -} - -// NewReferenceLoader returns a JSON reference loader using the given source and the local OS file system. -func NewReferenceLoader(source string) *jsonReferenceLoader { - return &jsonReferenceLoader{ - fs: osFS, - source: source, - } -} - -// NewReferenceLoaderFileSystem returns a JSON reference loader using the given source and file system. -func NewReferenceLoaderFileSystem(source string, fs http.FileSystem) *jsonReferenceLoader { - return &jsonReferenceLoader{ - fs: fs, - source: source, - } -} - -func (l *jsonReferenceLoader) LoadJSON() (interface{}, error) { - - var err error - - reference, err := gojsonreference.NewJsonReference(l.JsonSource().(string)) - if err != nil { - return nil, err - } - - refToUrl := reference - refToUrl.GetUrl().Fragment = "" - - var document interface{} - - if reference.HasFileScheme { - - filename := strings.Replace(refToUrl.GetUrl().Path, "file://", "", -1) - if runtime.GOOS == "windows" { - // on Windows, a file URL may have an extra leading slash, use slashes - // instead of backslashes, and have spaces escaped - if strings.HasPrefix(filename, "/") { - filename = filename[1:] - } - filename = filepath.FromSlash(filename) - } - - document, err = l.loadFromFile(filename) - if err != nil { - return nil, err - } - - } else { - - document, err = l.loadFromHTTP(refToUrl.String()) - if err != nil { - return nil, err - } - - } - - return document, nil - -} - -func (l *jsonReferenceLoader) loadFromHTTP(address string) (interface{}, error) { - - resp, err := http.Get(address) - if err != nil { - return nil, err - } - - // must return HTTP Status 200 OK - if resp.StatusCode != http.StatusOK { - return nil, errors.New(formatErrorDescription(Locale.HttpBadStatus(), ErrorDetails{"status": resp.Status})) - } - - bodyBuff, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - - return decodeJsonUsingNumber(bytes.NewReader(bodyBuff)) - -} - -func (l *jsonReferenceLoader) loadFromFile(path string) (interface{}, error) { - f, err := l.fs.Open(path) - if err != nil { - return nil, err - } - defer f.Close() - - bodyBuff, err := ioutil.ReadAll(f) - if err != nil { - return nil, err - } - - return decodeJsonUsingNumber(bytes.NewReader(bodyBuff)) - -} - -// JSON string loader - -type jsonStringLoader struct { - source string -} - -func (l *jsonStringLoader) JsonSource() interface{} { - return l.source -} - -func (l *jsonStringLoader) JsonReference() (gojsonreference.JsonReference, error) { - return gojsonreference.NewJsonReference("#") -} - -func (l *jsonStringLoader) LoaderFactory() JSONLoaderFactory { - return &DefaultJSONLoaderFactory{} -} - -func NewStringLoader(source string) *jsonStringLoader { - return &jsonStringLoader{source: source} -} - -func (l *jsonStringLoader) LoadJSON() (interface{}, error) { - - return decodeJsonUsingNumber(strings.NewReader(l.JsonSource().(string))) - -} - -// JSON bytes loader - -type jsonBytesLoader struct { - source []byte -} - -func (l *jsonBytesLoader) JsonSource() interface{} { - return l.source -} - -func (l *jsonBytesLoader) JsonReference() (gojsonreference.JsonReference, error) { - return gojsonreference.NewJsonReference("#") -} - -func (l *jsonBytesLoader) LoaderFactory() JSONLoaderFactory { - return &DefaultJSONLoaderFactory{} -} - -func NewBytesLoader(source []byte) *jsonBytesLoader { - return &jsonBytesLoader{source: source} -} - -func (l *jsonBytesLoader) LoadJSON() (interface{}, error) { - return decodeJsonUsingNumber(bytes.NewReader(l.JsonSource().([]byte))) -} - -// JSON Go (types) loader -// used to load JSONs from the code as maps, interface{}, structs ... - -type jsonGoLoader struct { - source interface{} -} - -func (l *jsonGoLoader) JsonSource() interface{} { - return l.source -} - -func (l *jsonGoLoader) JsonReference() (gojsonreference.JsonReference, error) { - return gojsonreference.NewJsonReference("#") -} - -func (l *jsonGoLoader) LoaderFactory() JSONLoaderFactory { - return &DefaultJSONLoaderFactory{} -} - -func NewGoLoader(source interface{}) *jsonGoLoader { - return &jsonGoLoader{source: source} -} - -func (l *jsonGoLoader) LoadJSON() (interface{}, error) { - - // convert it to a compliant JSON first to avoid types "mismatches" - - jsonBytes, err := json.Marshal(l.JsonSource()) - if err != nil { - return nil, err - } - - return decodeJsonUsingNumber(bytes.NewReader(jsonBytes)) - -} - -type jsonIOLoader struct { - buf *bytes.Buffer -} - -func NewReaderLoader(source io.Reader) (*jsonIOLoader, io.Reader) { - buf := &bytes.Buffer{} - return &jsonIOLoader{buf: buf}, io.TeeReader(source, buf) -} - -func NewWriterLoader(source io.Writer) (*jsonIOLoader, io.Writer) { - buf := &bytes.Buffer{} - return &jsonIOLoader{buf: buf}, io.MultiWriter(source, buf) -} - -func (l *jsonIOLoader) JsonSource() interface{} { - return l.buf.String() -} - -func (l *jsonIOLoader) LoadJSON() (interface{}, error) { - return decodeJsonUsingNumber(l.buf) -} - -func (l *jsonIOLoader) JsonReference() (gojsonreference.JsonReference, error) { - return gojsonreference.NewJsonReference("#") -} - -func (l *jsonIOLoader) LoaderFactory() JSONLoaderFactory { - return &DefaultJSONLoaderFactory{} -} - -func decodeJsonUsingNumber(r io.Reader) (interface{}, error) { - - var document interface{} - - decoder := json.NewDecoder(r) - decoder.UseNumber() - - err := decoder.Decode(&document) - if err != nil { - return nil, err - } - - return document, nil - -} diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/LICENSE b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/LICENSE deleted file mode 100644 index c28adbadd..000000000 --- a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2012 Julian Berman - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/xeipuuv/gojsonschema/locales.go b/vendor/github.com/xeipuuv/gojsonschema/locales.go deleted file mode 100644 index ee41484a7..000000000 --- a/vendor/github.com/xeipuuv/gojsonschema/locales.go +++ /dev/null @@ -1,286 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Contains const string and messages. -// -// created 01-01-2015 - -package gojsonschema - -type ( - // locale is an interface for defining custom error strings - locale interface { - Required() string - InvalidType() string - NumberAnyOf() string - NumberOneOf() string - NumberAllOf() string - NumberNot() string - MissingDependency() string - Internal() string - Enum() string - ArrayNotEnoughItems() string - ArrayNoAdditionalItems() string - ArrayMinItems() string - ArrayMaxItems() string - Unique() string - ArrayMinProperties() string - ArrayMaxProperties() string - AdditionalPropertyNotAllowed() string - InvalidPropertyPattern() string - StringGTE() string - StringLTE() string - DoesNotMatchPattern() string - DoesNotMatchFormat() string - MultipleOf() string - NumberGTE() string - NumberGT() string - NumberLTE() string - NumberLT() string - - // Schema validations - RegexPattern() string - GreaterThanZero() string - MustBeOfA() string - MustBeOfAn() string - CannotBeUsedWithout() string - CannotBeGT() string - MustBeOfType() string - MustBeValidRegex() string - MustBeValidFormat() string - MustBeGTEZero() string - KeyCannotBeGreaterThan() string - KeyItemsMustBeOfType() string - KeyItemsMustBeUnique() string - ReferenceMustBeCanonical() string - NotAValidType() string - Duplicated() string - HttpBadStatus() string - ParseError() string - - // ErrorFormat - ErrorFormat() string - } - - // DefaultLocale is the default locale for this package - DefaultLocale struct{} -) - -func (l DefaultLocale) Required() string { - return `{{.property}} is required` -} - -func (l DefaultLocale) InvalidType() string { - return `Invalid type. Expected: {{.expected}}, given: {{.given}}` -} - -func (l DefaultLocale) NumberAnyOf() string { - return `Must validate at least one schema (anyOf)` -} - -func (l DefaultLocale) NumberOneOf() string { - return `Must validate one and only one schema (oneOf)` -} - -func (l DefaultLocale) NumberAllOf() string { - return `Must validate all the schemas (allOf)` -} - -func (l DefaultLocale) NumberNot() string { - return `Must not validate the schema (not)` -} - -func (l DefaultLocale) MissingDependency() string { - return `Has a dependency on {{.dependency}}` -} - -func (l DefaultLocale) Internal() string { - return `Internal Error {{.error}}` -} - -func (l DefaultLocale) Enum() string { - return `{{.field}} must be one of the following: {{.allowed}}` -} - -func (l DefaultLocale) ArrayNoAdditionalItems() string { - return `No additional items allowed on array` -} - -func (l DefaultLocale) ArrayNotEnoughItems() string { - return `Not enough items on array to match positional list of schema` -} - -func (l DefaultLocale) ArrayMinItems() string { - return `Array must have at least {{.min}} items` -} - -func (l DefaultLocale) ArrayMaxItems() string { - return `Array must have at most {{.max}} items` -} - -func (l DefaultLocale) Unique() string { - return `{{.type}} items must be unique` -} - -func (l DefaultLocale) ArrayMinProperties() string { - return `Must have at least {{.min}} properties` -} - -func (l DefaultLocale) ArrayMaxProperties() string { - return `Must have at most {{.max}} properties` -} - -func (l DefaultLocale) AdditionalPropertyNotAllowed() string { - return `Additional property {{.property}} is not allowed` -} - -func (l DefaultLocale) InvalidPropertyPattern() string { - return `Property "{{.property}}" does not match pattern {{.pattern}}` -} - -func (l DefaultLocale) StringGTE() string { - return `String length must be greater than or equal to {{.min}}` -} - -func (l DefaultLocale) StringLTE() string { - return `String length must be less than or equal to {{.max}}` -} - -func (l DefaultLocale) DoesNotMatchPattern() string { - return `Does not match pattern '{{.pattern}}'` -} - -func (l DefaultLocale) DoesNotMatchFormat() string { - return `Does not match format '{{.format}}'` -} - -func (l DefaultLocale) MultipleOf() string { - return `Must be a multiple of {{.multiple}}` -} - -func (l DefaultLocale) NumberGTE() string { - return `Must be greater than or equal to {{.min}}` -} - -func (l DefaultLocale) NumberGT() string { - return `Must be greater than {{.min}}` -} - -func (l DefaultLocale) NumberLTE() string { - return `Must be less than or equal to {{.max}}` -} - -func (l DefaultLocale) NumberLT() string { - return `Must be less than {{.max}}` -} - -// Schema validators -func (l DefaultLocale) RegexPattern() string { - return `Invalid regex pattern '{{.pattern}}'` -} - -func (l DefaultLocale) GreaterThanZero() string { - return `{{.number}} must be strictly greater than 0` -} - -func (l DefaultLocale) MustBeOfA() string { - return `{{.x}} must be of a {{.y}}` -} - -func (l DefaultLocale) MustBeOfAn() string { - return `{{.x}} must be of an {{.y}}` -} - -func (l DefaultLocale) CannotBeUsedWithout() string { - return `{{.x}} cannot be used without {{.y}}` -} - -func (l DefaultLocale) CannotBeGT() string { - return `{{.x}} cannot be greater than {{.y}}` -} - -func (l DefaultLocale) MustBeOfType() string { - return `{{.key}} must be of type {{.type}}` -} - -func (l DefaultLocale) MustBeValidRegex() string { - return `{{.key}} must be a valid regex` -} - -func (l DefaultLocale) MustBeValidFormat() string { - return `{{.key}} must be a valid format {{.given}}` -} - -func (l DefaultLocale) MustBeGTEZero() string { - return `{{.key}} must be greater than or equal to 0` -} - -func (l DefaultLocale) KeyCannotBeGreaterThan() string { - return `{{.key}} cannot be greater than {{.y}}` -} - -func (l DefaultLocale) KeyItemsMustBeOfType() string { - return `{{.key}} items must be {{.type}}` -} - -func (l DefaultLocale) KeyItemsMustBeUnique() string { - return `{{.key}} items must be unique` -} - -func (l DefaultLocale) ReferenceMustBeCanonical() string { - return `Reference {{.reference}} must be canonical` -} - -func (l DefaultLocale) NotAValidType() string { - return `has a primitive type that is NOT VALID -- given: {{.given}} Expected valid values are:{{.expected}}` -} - -func (l DefaultLocale) Duplicated() string { - return `{{.type}} type is duplicated` -} - -func (l DefaultLocale) HttpBadStatus() string { - return `Could not read schema from HTTP, response status is {{.status}}` -} - -// Replacement options: field, description, context, value -func (l DefaultLocale) ErrorFormat() string { - return `{{.field}}: {{.description}}` -} - -//Parse error -func (l DefaultLocale) ParseError() string { - return `Expected: %expected%, given: Invalid JSON` -} - -const ( - STRING_NUMBER = "number" - STRING_ARRAY_OF_STRINGS = "array of strings" - STRING_ARRAY_OF_SCHEMAS = "array of schemas" - STRING_SCHEMA = "schema" - STRING_SCHEMA_OR_ARRAY_OF_STRINGS = "schema or array of strings" - STRING_PROPERTIES = "properties" - STRING_DEPENDENCY = "dependency" - STRING_PROPERTY = "property" - STRING_UNDEFINED = "undefined" - STRING_CONTEXT_ROOT = "(root)" - STRING_ROOT_SCHEMA_PROPERTY = "(root)" -) diff --git a/vendor/github.com/xeipuuv/gojsonschema/result.go b/vendor/github.com/xeipuuv/gojsonschema/result.go deleted file mode 100644 index 6ad56ae86..000000000 --- a/vendor/github.com/xeipuuv/gojsonschema/result.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Result and ResultError implementations. -// -// created 01-01-2015 - -package gojsonschema - -import ( - "fmt" - "strings" -) - -type ( - // ErrorDetails is a map of details specific to each error. - // While the values will vary, every error will contain a "field" value - ErrorDetails map[string]interface{} - - // ResultError is the interface that library errors must implement - ResultError interface { - Field() string - SetType(string) - Type() string - SetContext(*jsonContext) - Context() *jsonContext - SetDescription(string) - Description() string - SetValue(interface{}) - Value() interface{} - SetDetails(ErrorDetails) - Details() ErrorDetails - String() string - } - - // ResultErrorFields holds the fields for each ResultError implementation. - // ResultErrorFields implements the ResultError interface, so custom errors - // can be defined by just embedding this type - ResultErrorFields struct { - errorType string // A string with the type of error (i.e. invalid_type) - context *jsonContext // Tree like notation of the part that failed the validation. ex (root).a.b ... - description string // A human readable error message - value interface{} // Value given by the JSON file that is the source of the error - details ErrorDetails - } - - Result struct { - errors []ResultError - // Scores how well the validation matched. Useful in generating - // better error messages for anyOf and oneOf. - score int - } -) - -// Field outputs the field name without the root context -// i.e. firstName or person.firstName instead of (root).firstName or (root).person.firstName -func (v *ResultErrorFields) Field() string { - if p, ok := v.Details()["property"]; ok { - if str, isString := p.(string); isString { - return str - } - } - - return strings.TrimPrefix(v.context.String(), STRING_ROOT_SCHEMA_PROPERTY+".") -} - -func (v *ResultErrorFields) SetType(errorType string) { - v.errorType = errorType -} - -func (v *ResultErrorFields) Type() string { - return v.errorType -} - -func (v *ResultErrorFields) SetContext(context *jsonContext) { - v.context = context -} - -func (v *ResultErrorFields) Context() *jsonContext { - return v.context -} - -func (v *ResultErrorFields) SetDescription(description string) { - v.description = description -} - -func (v *ResultErrorFields) Description() string { - return v.description -} - -func (v *ResultErrorFields) SetValue(value interface{}) { - v.value = value -} - -func (v *ResultErrorFields) Value() interface{} { - return v.value -} - -func (v *ResultErrorFields) SetDetails(details ErrorDetails) { - v.details = details -} - -func (v *ResultErrorFields) Details() ErrorDetails { - return v.details -} - -func (v ResultErrorFields) String() string { - // as a fallback, the value is displayed go style - valueString := fmt.Sprintf("%v", v.value) - - // marshal the go value value to json - if v.value == nil { - valueString = TYPE_NULL - } else { - if vs, err := marshalToJsonString(v.value); err == nil { - if vs == nil { - valueString = TYPE_NULL - } else { - valueString = *vs - } - } - } - - return formatErrorDescription(Locale.ErrorFormat(), ErrorDetails{ - "context": v.context.String(), - "description": v.description, - "value": valueString, - "field": v.Field(), - }) -} - -func (v *Result) Valid() bool { - return len(v.errors) == 0 -} - -func (v *Result) Errors() []ResultError { - return v.errors -} - -func (v *Result) addError(err ResultError, context *jsonContext, value interface{}, details ErrorDetails) { - newError(err, context, value, Locale, details) - v.errors = append(v.errors, err) - v.score -= 2 // results in a net -1 when added to the +1 we get at the end of the validation function -} - -// Used to copy errors from a sub-schema to the main one -func (v *Result) mergeErrors(otherResult *Result) { - v.errors = append(v.errors, otherResult.Errors()...) - v.score += otherResult.score -} - -func (v *Result) incrementScore() { - v.score++ -} diff --git a/vendor/github.com/xeipuuv/gojsonschema/schema.go b/vendor/github.com/xeipuuv/gojsonschema/schema.go deleted file mode 100644 index cc6cdbc0e..000000000 --- a/vendor/github.com/xeipuuv/gojsonschema/schema.go +++ /dev/null @@ -1,933 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Defines Schema, the main entry to every subSchema. -// Contains the parsing logic and error checking. -// -// created 26-02-2013 - -package gojsonschema - -import ( - // "encoding/json" - "errors" - "reflect" - "regexp" - "text/template" - - "github.com/xeipuuv/gojsonreference" -) - -var ( - // Locale is the default locale to use - // Library users can overwrite with their own implementation - Locale locale = DefaultLocale{} - - // ErrorTemplateFuncs allows you to define custom template funcs for use in localization. - ErrorTemplateFuncs template.FuncMap -) - -func NewSchema(l JSONLoader) (*Schema, error) { - ref, err := l.JsonReference() - if err != nil { - return nil, err - } - - d := Schema{} - d.pool = newSchemaPool(l.LoaderFactory()) - d.documentReference = ref - d.referencePool = newSchemaReferencePool() - - var doc interface{} - if ref.String() != "" { - // Get document from schema pool - spd, err := d.pool.GetDocument(d.documentReference) - if err != nil { - return nil, err - } - doc = spd.Document - } else { - // Load JSON directly - doc, err = l.LoadJSON() - if err != nil { - return nil, err - } - d.pool.SetStandaloneDocument(doc) - } - - err = d.parse(doc) - if err != nil { - return nil, err - } - - return &d, nil -} - -type Schema struct { - documentReference gojsonreference.JsonReference - rootSchema *subSchema - pool *schemaPool - referencePool *schemaReferencePool -} - -func (d *Schema) parse(document interface{}) error { - d.rootSchema = &subSchema{property: STRING_ROOT_SCHEMA_PROPERTY} - return d.parseSchema(document, d.rootSchema) -} - -func (d *Schema) SetRootSchemaName(name string) { - d.rootSchema.property = name -} - -// Parses a subSchema -// -// Pretty long function ( sorry :) )... but pretty straight forward, repetitive and boring -// Not much magic involved here, most of the job is to validate the key names and their values, -// then the values are copied into subSchema struct -// -func (d *Schema) parseSchema(documentNode interface{}, currentSchema *subSchema) error { - - if !isKind(documentNode, reflect.Map) { - return errors.New(formatErrorDescription( - Locale.ParseError(), - ErrorDetails{ - "expected": STRING_SCHEMA, - }, - )) - } - - m := documentNode.(map[string]interface{}) - - if currentSchema == d.rootSchema { - currentSchema.ref = &d.documentReference - } - - // $subSchema - if existsMapKey(m, KEY_SCHEMA) { - if !isKind(m[KEY_SCHEMA], reflect.String) { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_STRING, - "given": KEY_SCHEMA, - }, - )) - } - schemaRef := m[KEY_SCHEMA].(string) - schemaReference, err := gojsonreference.NewJsonReference(schemaRef) - currentSchema.subSchema = &schemaReference - if err != nil { - return err - } - } - - // $ref - if existsMapKey(m, KEY_REF) && !isKind(m[KEY_REF], reflect.String) { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_STRING, - "given": KEY_REF, - }, - )) - } - if k, ok := m[KEY_REF].(string); ok { - - jsonReference, err := gojsonreference.NewJsonReference(k) - if err != nil { - return err - } - - if jsonReference.HasFullUrl { - currentSchema.ref = &jsonReference - } else { - inheritedReference, err := currentSchema.ref.Inherits(jsonReference) - if err != nil { - return err - } - - currentSchema.ref = inheritedReference - } - - if sch, ok := d.referencePool.Get(currentSchema.ref.String() + k); ok { - currentSchema.refSchema = sch - - } else { - err := d.parseReference(documentNode, currentSchema, k) - if err != nil { - return err - } - - return nil - } - } - - // definitions - if existsMapKey(m, KEY_DEFINITIONS) { - if isKind(m[KEY_DEFINITIONS], reflect.Map) { - currentSchema.definitions = make(map[string]*subSchema) - for dk, dv := range m[KEY_DEFINITIONS].(map[string]interface{}) { - if isKind(dv, reflect.Map) { - newSchema := &subSchema{property: KEY_DEFINITIONS, parent: currentSchema, ref: currentSchema.ref} - currentSchema.definitions[dk] = newSchema - err := d.parseSchema(dv, newSchema) - if err != nil { - return errors.New(err.Error()) - } - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": STRING_ARRAY_OF_SCHEMAS, - "given": KEY_DEFINITIONS, - }, - )) - } - } - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": STRING_ARRAY_OF_SCHEMAS, - "given": KEY_DEFINITIONS, - }, - )) - } - - } - - // id - if existsMapKey(m, KEY_ID) && !isKind(m[KEY_ID], reflect.String) { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_STRING, - "given": KEY_ID, - }, - )) - } - if k, ok := m[KEY_ID].(string); ok { - currentSchema.id = &k - } - - // title - if existsMapKey(m, KEY_TITLE) && !isKind(m[KEY_TITLE], reflect.String) { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_STRING, - "given": KEY_TITLE, - }, - )) - } - if k, ok := m[KEY_TITLE].(string); ok { - currentSchema.title = &k - } - - // description - if existsMapKey(m, KEY_DESCRIPTION) && !isKind(m[KEY_DESCRIPTION], reflect.String) { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_STRING, - "given": KEY_DESCRIPTION, - }, - )) - } - if k, ok := m[KEY_DESCRIPTION].(string); ok { - currentSchema.description = &k - } - - // type - if existsMapKey(m, KEY_TYPE) { - if isKind(m[KEY_TYPE], reflect.String) { - if k, ok := m[KEY_TYPE].(string); ok { - err := currentSchema.types.Add(k) - if err != nil { - return err - } - } - } else { - if isKind(m[KEY_TYPE], reflect.Slice) { - arrayOfTypes := m[KEY_TYPE].([]interface{}) - for _, typeInArray := range arrayOfTypes { - if reflect.ValueOf(typeInArray).Kind() != reflect.String { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_STRING + "/" + STRING_ARRAY_OF_STRINGS, - "given": KEY_TYPE, - }, - )) - } else { - currentSchema.types.Add(typeInArray.(string)) - } - } - - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_STRING + "/" + STRING_ARRAY_OF_STRINGS, - "given": KEY_TYPE, - }, - )) - } - } - } - - // properties - if existsMapKey(m, KEY_PROPERTIES) { - err := d.parseProperties(m[KEY_PROPERTIES], currentSchema) - if err != nil { - return err - } - } - - // additionalProperties - if existsMapKey(m, KEY_ADDITIONAL_PROPERTIES) { - if isKind(m[KEY_ADDITIONAL_PROPERTIES], reflect.Bool) { - currentSchema.additionalProperties = m[KEY_ADDITIONAL_PROPERTIES].(bool) - } else if isKind(m[KEY_ADDITIONAL_PROPERTIES], reflect.Map) { - newSchema := &subSchema{property: KEY_ADDITIONAL_PROPERTIES, parent: currentSchema, ref: currentSchema.ref} - currentSchema.additionalProperties = newSchema - err := d.parseSchema(m[KEY_ADDITIONAL_PROPERTIES], newSchema) - if err != nil { - return errors.New(err.Error()) - } - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_BOOLEAN + "/" + STRING_SCHEMA, - "given": KEY_ADDITIONAL_PROPERTIES, - }, - )) - } - } - - // patternProperties - if existsMapKey(m, KEY_PATTERN_PROPERTIES) { - if isKind(m[KEY_PATTERN_PROPERTIES], reflect.Map) { - patternPropertiesMap := m[KEY_PATTERN_PROPERTIES].(map[string]interface{}) - if len(patternPropertiesMap) > 0 { - currentSchema.patternProperties = make(map[string]*subSchema) - for k, v := range patternPropertiesMap { - _, err := regexp.MatchString(k, "") - if err != nil { - return errors.New(formatErrorDescription( - Locale.RegexPattern(), - ErrorDetails{"pattern": k}, - )) - } - newSchema := &subSchema{property: k, parent: currentSchema, ref: currentSchema.ref} - err = d.parseSchema(v, newSchema) - if err != nil { - return errors.New(err.Error()) - } - currentSchema.patternProperties[k] = newSchema - } - } - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": STRING_SCHEMA, - "given": KEY_PATTERN_PROPERTIES, - }, - )) - } - } - - // dependencies - if existsMapKey(m, KEY_DEPENDENCIES) { - err := d.parseDependencies(m[KEY_DEPENDENCIES], currentSchema) - if err != nil { - return err - } - } - - // items - if existsMapKey(m, KEY_ITEMS) { - if isKind(m[KEY_ITEMS], reflect.Slice) { - for _, itemElement := range m[KEY_ITEMS].([]interface{}) { - if isKind(itemElement, reflect.Map) { - newSchema := &subSchema{parent: currentSchema, property: KEY_ITEMS} - newSchema.ref = currentSchema.ref - currentSchema.AddItemsChild(newSchema) - err := d.parseSchema(itemElement, newSchema) - if err != nil { - return err - } - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": STRING_SCHEMA + "/" + STRING_ARRAY_OF_SCHEMAS, - "given": KEY_ITEMS, - }, - )) - } - currentSchema.itemsChildrenIsSingleSchema = false - } - } else if isKind(m[KEY_ITEMS], reflect.Map) { - newSchema := &subSchema{parent: currentSchema, property: KEY_ITEMS} - newSchema.ref = currentSchema.ref - currentSchema.AddItemsChild(newSchema) - err := d.parseSchema(m[KEY_ITEMS], newSchema) - if err != nil { - return err - } - currentSchema.itemsChildrenIsSingleSchema = true - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": STRING_SCHEMA + "/" + STRING_ARRAY_OF_SCHEMAS, - "given": KEY_ITEMS, - }, - )) - } - } - - // additionalItems - if existsMapKey(m, KEY_ADDITIONAL_ITEMS) { - if isKind(m[KEY_ADDITIONAL_ITEMS], reflect.Bool) { - currentSchema.additionalItems = m[KEY_ADDITIONAL_ITEMS].(bool) - } else if isKind(m[KEY_ADDITIONAL_ITEMS], reflect.Map) { - newSchema := &subSchema{property: KEY_ADDITIONAL_ITEMS, parent: currentSchema, ref: currentSchema.ref} - currentSchema.additionalItems = newSchema - err := d.parseSchema(m[KEY_ADDITIONAL_ITEMS], newSchema) - if err != nil { - return errors.New(err.Error()) - } - } else { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": TYPE_BOOLEAN + "/" + STRING_SCHEMA, - "given": KEY_ADDITIONAL_ITEMS, - }, - )) - } - } - - // validation : number / integer - - if existsMapKey(m, KEY_MULTIPLE_OF) { - multipleOfValue := mustBeNumber(m[KEY_MULTIPLE_OF]) - if multipleOfValue == nil { - return errors.New(formatErrorDescription( - Locale.InvalidType(), - ErrorDetails{ - "expected": STRING_NUMBER, - "given": KEY_MULTIPLE_OF, - }, - )) - } - if *multipleOfValue <= 0 { - return errors.New(formatErrorDescription( - Locale.GreaterThanZero(), - ErrorDetails{"number": KEY_MULTIPLE_OF}, - )) - } - currentSchema.multipleOf = multipleOfValue - } - - if existsMapKey(m, KEY_MINIMUM) { - minimumValue := mustBeNumber(m[KEY_MINIMUM]) - if minimumValue == nil { - return errors.New(formatErrorDescription( - Locale.MustBeOfA(), - ErrorDetails{"x": KEY_MINIMUM, "y": STRING_NUMBER}, - )) - } - currentSchema.minimum = minimumValue - } - - if existsMapKey(m, KEY_EXCLUSIVE_MINIMUM) { - if isKind(m[KEY_EXCLUSIVE_MINIMUM], reflect.Bool) { - if currentSchema.minimum == nil { - return errors.New(formatErrorDescription( - Locale.CannotBeUsedWithout(), - ErrorDetails{"x": KEY_EXCLUSIVE_MINIMUM, "y": KEY_MINIMUM}, - )) - } - exclusiveMinimumValue := m[KEY_EXCLUSIVE_MINIMUM].(bool) - currentSchema.exclusiveMinimum = exclusiveMinimumValue - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfA(), - ErrorDetails{"x": KEY_EXCLUSIVE_MINIMUM, "y": TYPE_BOOLEAN}, - )) - } - } - - if existsMapKey(m, KEY_MAXIMUM) { - maximumValue := mustBeNumber(m[KEY_MAXIMUM]) - if maximumValue == nil { - return errors.New(formatErrorDescription( - Locale.MustBeOfA(), - ErrorDetails{"x": KEY_MAXIMUM, "y": STRING_NUMBER}, - )) - } - currentSchema.maximum = maximumValue - } - - if existsMapKey(m, KEY_EXCLUSIVE_MAXIMUM) { - if isKind(m[KEY_EXCLUSIVE_MAXIMUM], reflect.Bool) { - if currentSchema.maximum == nil { - return errors.New(formatErrorDescription( - Locale.CannotBeUsedWithout(), - ErrorDetails{"x": KEY_EXCLUSIVE_MAXIMUM, "y": KEY_MAXIMUM}, - )) - } - exclusiveMaximumValue := m[KEY_EXCLUSIVE_MAXIMUM].(bool) - currentSchema.exclusiveMaximum = exclusiveMaximumValue - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfA(), - ErrorDetails{"x": KEY_EXCLUSIVE_MAXIMUM, "y": STRING_NUMBER}, - )) - } - } - - if currentSchema.minimum != nil && currentSchema.maximum != nil { - if *currentSchema.minimum > *currentSchema.maximum { - return errors.New(formatErrorDescription( - Locale.CannotBeGT(), - ErrorDetails{"x": KEY_MINIMUM, "y": KEY_MAXIMUM}, - )) - } - } - - // validation : string - - if existsMapKey(m, KEY_MIN_LENGTH) { - minLengthIntegerValue := mustBeInteger(m[KEY_MIN_LENGTH]) - if minLengthIntegerValue == nil { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_MIN_LENGTH, "y": TYPE_INTEGER}, - )) - } - if *minLengthIntegerValue < 0 { - return errors.New(formatErrorDescription( - Locale.MustBeGTEZero(), - ErrorDetails{"key": KEY_MIN_LENGTH}, - )) - } - currentSchema.minLength = minLengthIntegerValue - } - - if existsMapKey(m, KEY_MAX_LENGTH) { - maxLengthIntegerValue := mustBeInteger(m[KEY_MAX_LENGTH]) - if maxLengthIntegerValue == nil { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_MAX_LENGTH, "y": TYPE_INTEGER}, - )) - } - if *maxLengthIntegerValue < 0 { - return errors.New(formatErrorDescription( - Locale.MustBeGTEZero(), - ErrorDetails{"key": KEY_MAX_LENGTH}, - )) - } - currentSchema.maxLength = maxLengthIntegerValue - } - - if currentSchema.minLength != nil && currentSchema.maxLength != nil { - if *currentSchema.minLength > *currentSchema.maxLength { - return errors.New(formatErrorDescription( - Locale.CannotBeGT(), - ErrorDetails{"x": KEY_MIN_LENGTH, "y": KEY_MAX_LENGTH}, - )) - } - } - - if existsMapKey(m, KEY_PATTERN) { - if isKind(m[KEY_PATTERN], reflect.String) { - regexpObject, err := regexp.Compile(m[KEY_PATTERN].(string)) - if err != nil { - return errors.New(formatErrorDescription( - Locale.MustBeValidRegex(), - ErrorDetails{"key": KEY_PATTERN}, - )) - } - currentSchema.pattern = regexpObject - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfA(), - ErrorDetails{"x": KEY_PATTERN, "y": TYPE_STRING}, - )) - } - } - - if existsMapKey(m, KEY_FORMAT) { - formatString, ok := m[KEY_FORMAT].(string) - if ok && FormatCheckers.Has(formatString) { - currentSchema.format = formatString - } else { - return errors.New(formatErrorDescription( - Locale.MustBeValidFormat(), - ErrorDetails{"key": KEY_FORMAT, "given": m[KEY_FORMAT]}, - )) - } - } - - // validation : object - - if existsMapKey(m, KEY_MIN_PROPERTIES) { - minPropertiesIntegerValue := mustBeInteger(m[KEY_MIN_PROPERTIES]) - if minPropertiesIntegerValue == nil { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_MIN_PROPERTIES, "y": TYPE_INTEGER}, - )) - } - if *minPropertiesIntegerValue < 0 { - return errors.New(formatErrorDescription( - Locale.MustBeGTEZero(), - ErrorDetails{"key": KEY_MIN_PROPERTIES}, - )) - } - currentSchema.minProperties = minPropertiesIntegerValue - } - - if existsMapKey(m, KEY_MAX_PROPERTIES) { - maxPropertiesIntegerValue := mustBeInteger(m[KEY_MAX_PROPERTIES]) - if maxPropertiesIntegerValue == nil { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_MAX_PROPERTIES, "y": TYPE_INTEGER}, - )) - } - if *maxPropertiesIntegerValue < 0 { - return errors.New(formatErrorDescription( - Locale.MustBeGTEZero(), - ErrorDetails{"key": KEY_MAX_PROPERTIES}, - )) - } - currentSchema.maxProperties = maxPropertiesIntegerValue - } - - if currentSchema.minProperties != nil && currentSchema.maxProperties != nil { - if *currentSchema.minProperties > *currentSchema.maxProperties { - return errors.New(formatErrorDescription( - Locale.KeyCannotBeGreaterThan(), - ErrorDetails{"key": KEY_MIN_PROPERTIES, "y": KEY_MAX_PROPERTIES}, - )) - } - } - - if existsMapKey(m, KEY_REQUIRED) { - if isKind(m[KEY_REQUIRED], reflect.Slice) { - requiredValues := m[KEY_REQUIRED].([]interface{}) - for _, requiredValue := range requiredValues { - if isKind(requiredValue, reflect.String) { - err := currentSchema.AddRequired(requiredValue.(string)) - if err != nil { - return err - } - } else { - return errors.New(formatErrorDescription( - Locale.KeyItemsMustBeOfType(), - ErrorDetails{"key": KEY_REQUIRED, "type": TYPE_STRING}, - )) - } - } - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_REQUIRED, "y": TYPE_ARRAY}, - )) - } - } - - // validation : array - - if existsMapKey(m, KEY_MIN_ITEMS) { - minItemsIntegerValue := mustBeInteger(m[KEY_MIN_ITEMS]) - if minItemsIntegerValue == nil { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_MIN_ITEMS, "y": TYPE_INTEGER}, - )) - } - if *minItemsIntegerValue < 0 { - return errors.New(formatErrorDescription( - Locale.MustBeGTEZero(), - ErrorDetails{"key": KEY_MIN_ITEMS}, - )) - } - currentSchema.minItems = minItemsIntegerValue - } - - if existsMapKey(m, KEY_MAX_ITEMS) { - maxItemsIntegerValue := mustBeInteger(m[KEY_MAX_ITEMS]) - if maxItemsIntegerValue == nil { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_MAX_ITEMS, "y": TYPE_INTEGER}, - )) - } - if *maxItemsIntegerValue < 0 { - return errors.New(formatErrorDescription( - Locale.MustBeGTEZero(), - ErrorDetails{"key": KEY_MAX_ITEMS}, - )) - } - currentSchema.maxItems = maxItemsIntegerValue - } - - if existsMapKey(m, KEY_UNIQUE_ITEMS) { - if isKind(m[KEY_UNIQUE_ITEMS], reflect.Bool) { - currentSchema.uniqueItems = m[KEY_UNIQUE_ITEMS].(bool) - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfA(), - ErrorDetails{"x": KEY_UNIQUE_ITEMS, "y": TYPE_BOOLEAN}, - )) - } - } - - // validation : all - - if existsMapKey(m, KEY_ENUM) { - if isKind(m[KEY_ENUM], reflect.Slice) { - for _, v := range m[KEY_ENUM].([]interface{}) { - err := currentSchema.AddEnum(v) - if err != nil { - return err - } - } - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_ENUM, "y": TYPE_ARRAY}, - )) - } - } - - // validation : subSchema - - if existsMapKey(m, KEY_ONE_OF) { - if isKind(m[KEY_ONE_OF], reflect.Slice) { - for _, v := range m[KEY_ONE_OF].([]interface{}) { - newSchema := &subSchema{property: KEY_ONE_OF, parent: currentSchema, ref: currentSchema.ref} - currentSchema.AddOneOf(newSchema) - err := d.parseSchema(v, newSchema) - if err != nil { - return err - } - } - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_ONE_OF, "y": TYPE_ARRAY}, - )) - } - } - - if existsMapKey(m, KEY_ANY_OF) { - if isKind(m[KEY_ANY_OF], reflect.Slice) { - for _, v := range m[KEY_ANY_OF].([]interface{}) { - newSchema := &subSchema{property: KEY_ANY_OF, parent: currentSchema, ref: currentSchema.ref} - currentSchema.AddAnyOf(newSchema) - err := d.parseSchema(v, newSchema) - if err != nil { - return err - } - } - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_ANY_OF, "y": TYPE_ARRAY}, - )) - } - } - - if existsMapKey(m, KEY_ALL_OF) { - if isKind(m[KEY_ALL_OF], reflect.Slice) { - for _, v := range m[KEY_ALL_OF].([]interface{}) { - newSchema := &subSchema{property: KEY_ALL_OF, parent: currentSchema, ref: currentSchema.ref} - currentSchema.AddAllOf(newSchema) - err := d.parseSchema(v, newSchema) - if err != nil { - return err - } - } - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_ANY_OF, "y": TYPE_ARRAY}, - )) - } - } - - if existsMapKey(m, KEY_NOT) { - if isKind(m[KEY_NOT], reflect.Map) { - newSchema := &subSchema{property: KEY_NOT, parent: currentSchema, ref: currentSchema.ref} - currentSchema.SetNot(newSchema) - err := d.parseSchema(m[KEY_NOT], newSchema) - if err != nil { - return err - } - } else { - return errors.New(formatErrorDescription( - Locale.MustBeOfAn(), - ErrorDetails{"x": KEY_NOT, "y": TYPE_OBJECT}, - )) - } - } - - return nil -} - -func (d *Schema) parseReference(documentNode interface{}, currentSchema *subSchema, reference string) error { - var refdDocumentNode interface{} - jsonPointer := currentSchema.ref.GetPointer() - standaloneDocument := d.pool.GetStandaloneDocument() - - if standaloneDocument != nil { - - var err error - refdDocumentNode, _, err = jsonPointer.Get(standaloneDocument) - if err != nil { - return err - } - - } else { - dsp, err := d.pool.GetDocument(*currentSchema.ref) - if err != nil { - return err - } - - refdDocumentNode, _, err = jsonPointer.Get(dsp.Document) - if err != nil { - return err - } - - } - - if !isKind(refdDocumentNode, reflect.Map) { - return errors.New(formatErrorDescription( - Locale.MustBeOfType(), - ErrorDetails{"key": STRING_SCHEMA, "type": TYPE_OBJECT}, - )) - } - - // returns the loaded referenced subSchema for the caller to update its current subSchema - newSchemaDocument := refdDocumentNode.(map[string]interface{}) - newSchema := &subSchema{property: KEY_REF, parent: currentSchema, ref: currentSchema.ref} - d.referencePool.Add(currentSchema.ref.String()+reference, newSchema) - - err := d.parseSchema(newSchemaDocument, newSchema) - if err != nil { - return err - } - - currentSchema.refSchema = newSchema - - return nil - -} - -func (d *Schema) parseProperties(documentNode interface{}, currentSchema *subSchema) error { - - if !isKind(documentNode, reflect.Map) { - return errors.New(formatErrorDescription( - Locale.MustBeOfType(), - ErrorDetails{"key": STRING_PROPERTIES, "type": TYPE_OBJECT}, - )) - } - - m := documentNode.(map[string]interface{}) - for k := range m { - schemaProperty := k - newSchema := &subSchema{property: schemaProperty, parent: currentSchema, ref: currentSchema.ref} - currentSchema.AddPropertiesChild(newSchema) - err := d.parseSchema(m[k], newSchema) - if err != nil { - return err - } - } - - return nil -} - -func (d *Schema) parseDependencies(documentNode interface{}, currentSchema *subSchema) error { - - if !isKind(documentNode, reflect.Map) { - return errors.New(formatErrorDescription( - Locale.MustBeOfType(), - ErrorDetails{"key": KEY_DEPENDENCIES, "type": TYPE_OBJECT}, - )) - } - - m := documentNode.(map[string]interface{}) - currentSchema.dependencies = make(map[string]interface{}) - - for k := range m { - switch reflect.ValueOf(m[k]).Kind() { - - case reflect.Slice: - values := m[k].([]interface{}) - var valuesToRegister []string - - for _, value := range values { - if !isKind(value, reflect.String) { - return errors.New(formatErrorDescription( - Locale.MustBeOfType(), - ErrorDetails{ - "key": STRING_DEPENDENCY, - "type": STRING_SCHEMA_OR_ARRAY_OF_STRINGS, - }, - )) - } else { - valuesToRegister = append(valuesToRegister, value.(string)) - } - currentSchema.dependencies[k] = valuesToRegister - } - - case reflect.Map: - depSchema := &subSchema{property: k, parent: currentSchema, ref: currentSchema.ref} - err := d.parseSchema(m[k], depSchema) - if err != nil { - return err - } - currentSchema.dependencies[k] = depSchema - - default: - return errors.New(formatErrorDescription( - Locale.MustBeOfType(), - ErrorDetails{ - "key": STRING_DEPENDENCY, - "type": STRING_SCHEMA_OR_ARRAY_OF_STRINGS, - }, - )) - } - - } - - return nil -} diff --git a/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go b/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go deleted file mode 100644 index f2ad641af..000000000 --- a/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Defines resources pooling. -// Eases referencing and avoids downloading the same resource twice. -// -// created 26-02-2013 - -package gojsonschema - -import ( - "errors" - - "github.com/xeipuuv/gojsonreference" -) - -type schemaPoolDocument struct { - Document interface{} -} - -type schemaPool struct { - schemaPoolDocuments map[string]*schemaPoolDocument - standaloneDocument interface{} - jsonLoaderFactory JSONLoaderFactory -} - -func newSchemaPool(f JSONLoaderFactory) *schemaPool { - - p := &schemaPool{} - p.schemaPoolDocuments = make(map[string]*schemaPoolDocument) - p.standaloneDocument = nil - p.jsonLoaderFactory = f - - return p -} - -func (p *schemaPool) SetStandaloneDocument(document interface{}) { - p.standaloneDocument = document -} - -func (p *schemaPool) GetStandaloneDocument() (document interface{}) { - return p.standaloneDocument -} - -func (p *schemaPool) GetDocument(reference gojsonreference.JsonReference) (*schemaPoolDocument, error) { - - if internalLogEnabled { - internalLog("Get Document ( %s )", reference.String()) - } - - var err error - - // It is not possible to load anything that is not canonical... - if !reference.IsCanonical() { - return nil, errors.New(formatErrorDescription( - Locale.ReferenceMustBeCanonical(), - ErrorDetails{"reference": reference}, - )) - } - - refToUrl := reference - refToUrl.GetUrl().Fragment = "" - - var spd *schemaPoolDocument - - // Try to find the requested document in the pool - for k := range p.schemaPoolDocuments { - if k == refToUrl.String() { - spd = p.schemaPoolDocuments[k] - } - } - - if spd != nil { - if internalLogEnabled { - internalLog(" From pool") - } - return spd, nil - } - - jsonReferenceLoader := p.jsonLoaderFactory.New(reference.String()) - document, err := jsonReferenceLoader.LoadJSON() - if err != nil { - return nil, err - } - - spd = &schemaPoolDocument{Document: document} - // add the document to the pool for potential later use - p.schemaPoolDocuments[refToUrl.String()] = spd - - return spd, nil -} diff --git a/vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go b/vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go deleted file mode 100644 index 294e36a73..000000000 --- a/vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Pool of referenced schemas. -// -// created 25-06-2013 - -package gojsonschema - -import ( - "fmt" -) - -type schemaReferencePool struct { - documents map[string]*subSchema -} - -func newSchemaReferencePool() *schemaReferencePool { - - p := &schemaReferencePool{} - p.documents = make(map[string]*subSchema) - - return p -} - -func (p *schemaReferencePool) Get(ref string) (r *subSchema, o bool) { - - if internalLogEnabled { - internalLog(fmt.Sprintf("Schema Reference ( %s )", ref)) - } - - if sch, ok := p.documents[ref]; ok { - if internalLogEnabled { - internalLog(fmt.Sprintf(" From pool")) - } - return sch, true - } - - return nil, false -} - -func (p *schemaReferencePool) Add(ref string, sch *subSchema) { - - if internalLogEnabled { - internalLog(fmt.Sprintf("Add Schema Reference %s to pool", ref)) - } - - p.documents[ref] = sch -} diff --git a/vendor/github.com/xeipuuv/gojsonschema/schemaType.go b/vendor/github.com/xeipuuv/gojsonschema/schemaType.go deleted file mode 100644 index 36b447a29..000000000 --- a/vendor/github.com/xeipuuv/gojsonschema/schemaType.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Helper structure to handle schema types, and the combination of them. -// -// created 28-02-2013 - -package gojsonschema - -import ( - "errors" - "fmt" - "strings" -) - -type jsonSchemaType struct { - types []string -} - -// Is the schema typed ? that is containing at least one type -// When not typed, the schema does not need any type validation -func (t *jsonSchemaType) IsTyped() bool { - return len(t.types) > 0 -} - -func (t *jsonSchemaType) Add(etype string) error { - - if !isStringInSlice(JSON_TYPES, etype) { - return errors.New(formatErrorDescription(Locale.NotAValidType(), ErrorDetails{"given": "/" + etype + "/", "expected": JSON_TYPES})) - } - - if t.Contains(etype) { - return errors.New(formatErrorDescription(Locale.Duplicated(), ErrorDetails{"type": etype})) - } - - t.types = append(t.types, etype) - - return nil -} - -func (t *jsonSchemaType) Contains(etype string) bool { - - for _, v := range t.types { - if v == etype { - return true - } - } - - return false -} - -func (t *jsonSchemaType) String() string { - - if len(t.types) == 0 { - return STRING_UNDEFINED // should never happen - } - - // Displayed as a list [type1,type2,...] - if len(t.types) > 1 { - return fmt.Sprintf("[%s]", strings.Join(t.types, ",")) - } - - // Only one type: name only - return t.types[0] -} diff --git a/vendor/github.com/xeipuuv/gojsonschema/subSchema.go b/vendor/github.com/xeipuuv/gojsonschema/subSchema.go deleted file mode 100644 index 9ddbb5fc1..000000000 --- a/vendor/github.com/xeipuuv/gojsonschema/subSchema.go +++ /dev/null @@ -1,227 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Defines the structure of a sub-subSchema. -// A sub-subSchema can contain other sub-schemas. -// -// created 27-02-2013 - -package gojsonschema - -import ( - "errors" - "regexp" - "strings" - - "github.com/xeipuuv/gojsonreference" -) - -const ( - KEY_SCHEMA = "$subSchema" - KEY_ID = "$id" - KEY_REF = "$ref" - KEY_TITLE = "title" - KEY_DESCRIPTION = "description" - KEY_TYPE = "type" - KEY_ITEMS = "items" - KEY_ADDITIONAL_ITEMS = "additionalItems" - KEY_PROPERTIES = "properties" - KEY_PATTERN_PROPERTIES = "patternProperties" - KEY_ADDITIONAL_PROPERTIES = "additionalProperties" - KEY_DEFINITIONS = "definitions" - KEY_MULTIPLE_OF = "multipleOf" - KEY_MINIMUM = "minimum" - KEY_MAXIMUM = "maximum" - KEY_EXCLUSIVE_MINIMUM = "exclusiveMinimum" - KEY_EXCLUSIVE_MAXIMUM = "exclusiveMaximum" - KEY_MIN_LENGTH = "minLength" - KEY_MAX_LENGTH = "maxLength" - KEY_PATTERN = "pattern" - KEY_FORMAT = "format" - KEY_MIN_PROPERTIES = "minProperties" - KEY_MAX_PROPERTIES = "maxProperties" - KEY_DEPENDENCIES = "dependencies" - KEY_REQUIRED = "required" - KEY_MIN_ITEMS = "minItems" - KEY_MAX_ITEMS = "maxItems" - KEY_UNIQUE_ITEMS = "uniqueItems" - KEY_ENUM = "enum" - KEY_ONE_OF = "oneOf" - KEY_ANY_OF = "anyOf" - KEY_ALL_OF = "allOf" - KEY_NOT = "not" -) - -type subSchema struct { - - // basic subSchema meta properties - id *string - title *string - description *string - - property string - - // Types associated with the subSchema - types jsonSchemaType - - // Reference url - ref *gojsonreference.JsonReference - // Schema referenced - refSchema *subSchema - // Json reference - subSchema *gojsonreference.JsonReference - - // hierarchy - parent *subSchema - definitions map[string]*subSchema - definitionsChildren []*subSchema - itemsChildren []*subSchema - itemsChildrenIsSingleSchema bool - propertiesChildren []*subSchema - - // validation : number / integer - multipleOf *float64 - maximum *float64 - exclusiveMaximum bool - minimum *float64 - exclusiveMinimum bool - - // validation : string - minLength *int - maxLength *int - pattern *regexp.Regexp - format string - - // validation : object - minProperties *int - maxProperties *int - required []string - - dependencies map[string]interface{} - additionalProperties interface{} - patternProperties map[string]*subSchema - - // validation : array - minItems *int - maxItems *int - uniqueItems bool - - additionalItems interface{} - - // validation : all - enum []string - - // validation : subSchema - oneOf []*subSchema - anyOf []*subSchema - allOf []*subSchema - not *subSchema -} - -func (s *subSchema) AddEnum(i interface{}) error { - - is, err := marshalToJsonString(i) - if err != nil { - return err - } - - if isStringInSlice(s.enum, *is) { - return errors.New(formatErrorDescription( - Locale.KeyItemsMustBeUnique(), - ErrorDetails{"key": KEY_ENUM}, - )) - } - - s.enum = append(s.enum, *is) - - return nil -} - -func (s *subSchema) ContainsEnum(i interface{}) (bool, error) { - - is, err := marshalToJsonString(i) - if err != nil { - return false, err - } - - return isStringInSlice(s.enum, *is), nil -} - -func (s *subSchema) AddOneOf(subSchema *subSchema) { - s.oneOf = append(s.oneOf, subSchema) -} - -func (s *subSchema) AddAllOf(subSchema *subSchema) { - s.allOf = append(s.allOf, subSchema) -} - -func (s *subSchema) AddAnyOf(subSchema *subSchema) { - s.anyOf = append(s.anyOf, subSchema) -} - -func (s *subSchema) SetNot(subSchema *subSchema) { - s.not = subSchema -} - -func (s *subSchema) AddRequired(value string) error { - - if isStringInSlice(s.required, value) { - return errors.New(formatErrorDescription( - Locale.KeyItemsMustBeUnique(), - ErrorDetails{"key": KEY_REQUIRED}, - )) - } - - s.required = append(s.required, value) - - return nil -} - -func (s *subSchema) AddDefinitionChild(child *subSchema) { - s.definitionsChildren = append(s.definitionsChildren, child) -} - -func (s *subSchema) AddItemsChild(child *subSchema) { - s.itemsChildren = append(s.itemsChildren, child) -} - -func (s *subSchema) AddPropertiesChild(child *subSchema) { - s.propertiesChildren = append(s.propertiesChildren, child) -} - -func (s *subSchema) PatternPropertiesString() string { - - if s.patternProperties == nil || len(s.patternProperties) == 0 { - return STRING_UNDEFINED // should never happen - } - - patternPropertiesKeySlice := []string{} - for pk := range s.patternProperties { - patternPropertiesKeySlice = append(patternPropertiesKeySlice, `"`+pk+`"`) - } - - if len(patternPropertiesKeySlice) == 1 { - return patternPropertiesKeySlice[0] - } - - return "[" + strings.Join(patternPropertiesKeySlice, ",") + "]" - -} diff --git a/vendor/github.com/xeipuuv/gojsonschema/types.go b/vendor/github.com/xeipuuv/gojsonschema/types.go deleted file mode 100644 index 952d22ef6..000000000 --- a/vendor/github.com/xeipuuv/gojsonschema/types.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Contains const types for schema and JSON. -// -// created 28-02-2013 - -package gojsonschema - -const ( - TYPE_ARRAY = `array` - TYPE_BOOLEAN = `boolean` - TYPE_INTEGER = `integer` - TYPE_NUMBER = `number` - TYPE_NULL = `null` - TYPE_OBJECT = `object` - TYPE_STRING = `string` -) - -var JSON_TYPES []string -var SCHEMA_TYPES []string - -func init() { - JSON_TYPES = []string{ - TYPE_ARRAY, - TYPE_BOOLEAN, - TYPE_INTEGER, - TYPE_NUMBER, - TYPE_NULL, - TYPE_OBJECT, - TYPE_STRING} - - SCHEMA_TYPES = []string{ - TYPE_ARRAY, - TYPE_BOOLEAN, - TYPE_INTEGER, - TYPE_NUMBER, - TYPE_OBJECT, - TYPE_STRING} -} diff --git a/vendor/github.com/xeipuuv/gojsonschema/utils.go b/vendor/github.com/xeipuuv/gojsonschema/utils.go deleted file mode 100644 index 26cf75ebf..000000000 --- a/vendor/github.com/xeipuuv/gojsonschema/utils.go +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Various utility functions. -// -// created 26-02-2013 - -package gojsonschema - -import ( - "encoding/json" - "fmt" - "math" - "reflect" - "strconv" -) - -func isKind(what interface{}, kind reflect.Kind) bool { - target := what - if isJsonNumber(what) { - // JSON Numbers are strings! - target = *mustBeNumber(what) - } - return reflect.ValueOf(target).Kind() == kind -} - -func existsMapKey(m map[string]interface{}, k string) bool { - _, ok := m[k] - return ok -} - -func isStringInSlice(s []string, what string) bool { - for i := range s { - if s[i] == what { - return true - } - } - return false -} - -func marshalToJsonString(value interface{}) (*string, error) { - - mBytes, err := json.Marshal(value) - if err != nil { - return nil, err - } - - sBytes := string(mBytes) - return &sBytes, nil -} - -func isJsonNumber(what interface{}) bool { - - switch what.(type) { - - case json.Number: - return true - } - - return false -} - -func checkJsonNumber(what interface{}) (isValidFloat64 bool, isValidInt64 bool, isValidInt32 bool) { - - jsonNumber := what.(json.Number) - - f64, errFloat64 := jsonNumber.Float64() - s64 := strconv.FormatFloat(f64, 'f', -1, 64) - _, errInt64 := strconv.ParseInt(s64, 10, 64) - - isValidFloat64 = errFloat64 == nil - isValidInt64 = errInt64 == nil - - _, errInt32 := strconv.ParseInt(s64, 10, 32) - isValidInt32 = isValidInt64 && errInt32 == nil - - return - -} - -// same as ECMA Number.MAX_SAFE_INTEGER and Number.MIN_SAFE_INTEGER -const ( - max_json_float = float64(1<<53 - 1) // 9007199254740991.0 2^53 - 1 - min_json_float = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1 -) - -func isFloat64AnInteger(f float64) bool { - - if math.IsNaN(f) || math.IsInf(f, 0) || f < min_json_float || f > max_json_float { - return false - } - - return f == float64(int64(f)) || f == float64(uint64(f)) -} - -func mustBeInteger(what interface{}) *int { - - if isJsonNumber(what) { - - number := what.(json.Number) - - _, _, isValidInt32 := checkJsonNumber(number) - - if isValidInt32 { - - int64Value, err := number.Int64() - if err != nil { - return nil - } - - int32Value := int(int64Value) - return &int32Value - - } else { - return nil - } - - } - - return nil -} - -func mustBeNumber(what interface{}) *float64 { - - if isJsonNumber(what) { - - number := what.(json.Number) - float64Value, err := number.Float64() - - if err == nil { - return &float64Value - } else { - return nil - } - - } - - return nil - -} - -// formats a number so that it is displayed as the smallest string possible -func resultErrorFormatJsonNumber(n json.Number) string { - - if int64Value, err := n.Int64(); err == nil { - return fmt.Sprintf("%d", int64Value) - } - - float64Value, _ := n.Float64() - - return fmt.Sprintf("%g", float64Value) -} - -// formats a number so that it is displayed as the smallest string possible -func resultErrorFormatNumber(n float64) string { - - if isFloat64AnInteger(n) { - return fmt.Sprintf("%d", int64(n)) - } - - return fmt.Sprintf("%g", n) -} - -func convertDocumentNode(val interface{}) interface{} { - - if lval, ok := val.([]interface{}); ok { - - res := []interface{}{} - for _, v := range lval { - res = append(res, convertDocumentNode(v)) - } - - return res - - } - - if mval, ok := val.(map[interface{}]interface{}); ok { - - res := map[string]interface{}{} - - for k, v := range mval { - res[k.(string)] = convertDocumentNode(v) - } - - return res - - } - - return val -} diff --git a/vendor/github.com/xeipuuv/gojsonschema/validation.go b/vendor/github.com/xeipuuv/gojsonschema/validation.go deleted file mode 100644 index 6140bd8c2..000000000 --- a/vendor/github.com/xeipuuv/gojsonschema/validation.go +++ /dev/null @@ -1,832 +0,0 @@ -// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// author xeipuuv -// author-github https://github.com/xeipuuv -// author-mail xeipuuv@gmail.com -// -// repository-name gojsonschema -// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. -// -// description Extends Schema and subSchema, implements the validation phase. -// -// created 28-02-2013 - -package gojsonschema - -import ( - "encoding/json" - "reflect" - "regexp" - "strconv" - "strings" - "unicode/utf8" -) - -func Validate(ls JSONLoader, ld JSONLoader) (*Result, error) { - - var err error - - // load schema - - schema, err := NewSchema(ls) - if err != nil { - return nil, err - } - - // begine validation - - return schema.Validate(ld) - -} - -func (v *Schema) Validate(l JSONLoader) (*Result, error) { - - // load document - - root, err := l.LoadJSON() - if err != nil { - return nil, err - } - - // begin validation - - result := &Result{} - context := newJsonContext(STRING_CONTEXT_ROOT, nil) - v.rootSchema.validateRecursive(v.rootSchema, root, result, context) - - return result, nil - -} - -func (v *subSchema) subValidateWithContext(document interface{}, context *jsonContext) *Result { - result := &Result{} - v.validateRecursive(v, document, result, context) - return result -} - -// Walker function to validate the json recursively against the subSchema -func (v *subSchema) validateRecursive(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *jsonContext) { - - if internalLogEnabled { - internalLog("validateRecursive %s", context.String()) - internalLog(" %v", currentNode) - } - - // Handle referenced schemas, returns directly when a $ref is found - if currentSubSchema.refSchema != nil { - v.validateRecursive(currentSubSchema.refSchema, currentNode, result, context) - return - } - - // Check for null value - if currentNode == nil { - if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_NULL) { - result.addError( - new(InvalidTypeError), - context, - currentNode, - ErrorDetails{ - "expected": currentSubSchema.types.String(), - "given": TYPE_NULL, - }, - ) - return - } - - currentSubSchema.validateSchema(currentSubSchema, currentNode, result, context) - v.validateCommon(currentSubSchema, currentNode, result, context) - - } else { // Not a null value - - if isJsonNumber(currentNode) { - - value := currentNode.(json.Number) - - _, isValidInt64, _ := checkJsonNumber(value) - - validType := currentSubSchema.types.Contains(TYPE_NUMBER) || (isValidInt64 && currentSubSchema.types.Contains(TYPE_INTEGER)) - - if currentSubSchema.types.IsTyped() && !validType { - - givenType := TYPE_INTEGER - if !isValidInt64 { - givenType = TYPE_NUMBER - } - - result.addError( - new(InvalidTypeError), - context, - currentNode, - ErrorDetails{ - "expected": currentSubSchema.types.String(), - "given": givenType, - }, - ) - return - } - - currentSubSchema.validateSchema(currentSubSchema, value, result, context) - v.validateNumber(currentSubSchema, value, result, context) - v.validateCommon(currentSubSchema, value, result, context) - v.validateString(currentSubSchema, value, result, context) - - } else { - - rValue := reflect.ValueOf(currentNode) - rKind := rValue.Kind() - - switch rKind { - - // Slice => JSON array - - case reflect.Slice: - - if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_ARRAY) { - result.addError( - new(InvalidTypeError), - context, - currentNode, - ErrorDetails{ - "expected": currentSubSchema.types.String(), - "given": TYPE_ARRAY, - }, - ) - return - } - - castCurrentNode := currentNode.([]interface{}) - - currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context) - - v.validateArray(currentSubSchema, castCurrentNode, result, context) - v.validateCommon(currentSubSchema, castCurrentNode, result, context) - - // Map => JSON object - - case reflect.Map: - if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_OBJECT) { - result.addError( - new(InvalidTypeError), - context, - currentNode, - ErrorDetails{ - "expected": currentSubSchema.types.String(), - "given": TYPE_OBJECT, - }, - ) - return - } - - castCurrentNode, ok := currentNode.(map[string]interface{}) - if !ok { - castCurrentNode = convertDocumentNode(currentNode).(map[string]interface{}) - } - - currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context) - - v.validateObject(currentSubSchema, castCurrentNode, result, context) - v.validateCommon(currentSubSchema, castCurrentNode, result, context) - - for _, pSchema := range currentSubSchema.propertiesChildren { - nextNode, ok := castCurrentNode[pSchema.property] - if ok { - subContext := newJsonContext(pSchema.property, context) - v.validateRecursive(pSchema, nextNode, result, subContext) - } - } - - // Simple JSON values : string, number, boolean - - case reflect.Bool: - - if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_BOOLEAN) { - result.addError( - new(InvalidTypeError), - context, - currentNode, - ErrorDetails{ - "expected": currentSubSchema.types.String(), - "given": TYPE_BOOLEAN, - }, - ) - return - } - - value := currentNode.(bool) - - currentSubSchema.validateSchema(currentSubSchema, value, result, context) - v.validateNumber(currentSubSchema, value, result, context) - v.validateCommon(currentSubSchema, value, result, context) - v.validateString(currentSubSchema, value, result, context) - - case reflect.String: - - if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_STRING) { - result.addError( - new(InvalidTypeError), - context, - currentNode, - ErrorDetails{ - "expected": currentSubSchema.types.String(), - "given": TYPE_STRING, - }, - ) - return - } - - value := currentNode.(string) - - currentSubSchema.validateSchema(currentSubSchema, value, result, context) - v.validateNumber(currentSubSchema, value, result, context) - v.validateCommon(currentSubSchema, value, result, context) - v.validateString(currentSubSchema, value, result, context) - - } - - } - - } - - result.incrementScore() -} - -// Different kinds of validation there, subSchema / common / array / object / string... -func (v *subSchema) validateSchema(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *jsonContext) { - - if internalLogEnabled { - internalLog("validateSchema %s", context.String()) - internalLog(" %v", currentNode) - } - - if len(currentSubSchema.anyOf) > 0 { - - validatedAnyOf := false - var bestValidationResult *Result - - for _, anyOfSchema := range currentSubSchema.anyOf { - if !validatedAnyOf { - validationResult := anyOfSchema.subValidateWithContext(currentNode, context) - validatedAnyOf = validationResult.Valid() - - if !validatedAnyOf && (bestValidationResult == nil || validationResult.score > bestValidationResult.score) { - bestValidationResult = validationResult - } - } - } - if !validatedAnyOf { - - result.addError(new(NumberAnyOfError), context, currentNode, ErrorDetails{}) - - if bestValidationResult != nil { - // add error messages of closest matching subSchema as - // that's probably the one the user was trying to match - result.mergeErrors(bestValidationResult) - } - } - } - - if len(currentSubSchema.oneOf) > 0 { - - nbValidated := 0 - var bestValidationResult *Result - - for _, oneOfSchema := range currentSubSchema.oneOf { - validationResult := oneOfSchema.subValidateWithContext(currentNode, context) - if validationResult.Valid() { - nbValidated++ - } else if nbValidated == 0 && (bestValidationResult == nil || validationResult.score > bestValidationResult.score) { - bestValidationResult = validationResult - } - } - - if nbValidated != 1 { - - result.addError(new(NumberOneOfError), context, currentNode, ErrorDetails{}) - - if nbValidated == 0 { - // add error messages of closest matching subSchema as - // that's probably the one the user was trying to match - result.mergeErrors(bestValidationResult) - } - } - - } - - if len(currentSubSchema.allOf) > 0 { - nbValidated := 0 - - for _, allOfSchema := range currentSubSchema.allOf { - validationResult := allOfSchema.subValidateWithContext(currentNode, context) - if validationResult.Valid() { - nbValidated++ - } - result.mergeErrors(validationResult) - } - - if nbValidated != len(currentSubSchema.allOf) { - result.addError(new(NumberAllOfError), context, currentNode, ErrorDetails{}) - } - } - - if currentSubSchema.not != nil { - validationResult := currentSubSchema.not.subValidateWithContext(currentNode, context) - if validationResult.Valid() { - result.addError(new(NumberNotError), context, currentNode, ErrorDetails{}) - } - } - - if currentSubSchema.dependencies != nil && len(currentSubSchema.dependencies) > 0 { - if isKind(currentNode, reflect.Map) { - for elementKey := range currentNode.(map[string]interface{}) { - if dependency, ok := currentSubSchema.dependencies[elementKey]; ok { - switch dependency := dependency.(type) { - - case []string: - for _, dependOnKey := range dependency { - if _, dependencyResolved := currentNode.(map[string]interface{})[dependOnKey]; !dependencyResolved { - result.addError( - new(MissingDependencyError), - context, - currentNode, - ErrorDetails{"dependency": dependOnKey}, - ) - } - } - - case *subSchema: - dependency.validateRecursive(dependency, currentNode, result, context) - - } - } - } - } - } - - result.incrementScore() -} - -func (v *subSchema) validateCommon(currentSubSchema *subSchema, value interface{}, result *Result, context *jsonContext) { - - if internalLogEnabled { - internalLog("validateCommon %s", context.String()) - internalLog(" %v", value) - } - - // enum: - if len(currentSubSchema.enum) > 0 { - has, err := currentSubSchema.ContainsEnum(value) - if err != nil { - result.addError(new(InternalError), context, value, ErrorDetails{"error": err}) - } - if !has { - result.addError( - new(EnumError), - context, - value, - ErrorDetails{ - "allowed": strings.Join(currentSubSchema.enum, ", "), - }, - ) - } - } - - result.incrementScore() -} - -func (v *subSchema) validateArray(currentSubSchema *subSchema, value []interface{}, result *Result, context *jsonContext) { - - if internalLogEnabled { - internalLog("validateArray %s", context.String()) - internalLog(" %v", value) - } - - nbValues := len(value) - - // TODO explain - if currentSubSchema.itemsChildrenIsSingleSchema { - for i := range value { - subContext := newJsonContext(strconv.Itoa(i), context) - validationResult := currentSubSchema.itemsChildren[0].subValidateWithContext(value[i], subContext) - result.mergeErrors(validationResult) - } - } else { - if currentSubSchema.itemsChildren != nil && len(currentSubSchema.itemsChildren) > 0 { - - nbItems := len(currentSubSchema.itemsChildren) - - // while we have both schemas and values, check them against each other - for i := 0; i != nbItems && i != nbValues; i++ { - subContext := newJsonContext(strconv.Itoa(i), context) - validationResult := currentSubSchema.itemsChildren[i].subValidateWithContext(value[i], subContext) - result.mergeErrors(validationResult) - } - - if nbItems < nbValues { - // we have less schemas than elements in the instance array, - // but that might be ok if "additionalItems" is specified. - - switch currentSubSchema.additionalItems.(type) { - case bool: - if !currentSubSchema.additionalItems.(bool) { - result.addError(new(ArrayNoAdditionalItemsError), context, value, ErrorDetails{}) - } - case *subSchema: - additionalItemSchema := currentSubSchema.additionalItems.(*subSchema) - for i := nbItems; i != nbValues; i++ { - subContext := newJsonContext(strconv.Itoa(i), context) - validationResult := additionalItemSchema.subValidateWithContext(value[i], subContext) - result.mergeErrors(validationResult) - } - } - } - } - } - - // minItems & maxItems - if currentSubSchema.minItems != nil { - if nbValues < int(*currentSubSchema.minItems) { - result.addError( - new(ArrayMinItemsError), - context, - value, - ErrorDetails{"min": *currentSubSchema.minItems}, - ) - } - } - if currentSubSchema.maxItems != nil { - if nbValues > int(*currentSubSchema.maxItems) { - result.addError( - new(ArrayMaxItemsError), - context, - value, - ErrorDetails{"max": *currentSubSchema.maxItems}, - ) - } - } - - // uniqueItems: - if currentSubSchema.uniqueItems { - var stringifiedItems []string - for _, v := range value { - vString, err := marshalToJsonString(v) - if err != nil { - result.addError(new(InternalError), context, value, ErrorDetails{"err": err}) - } - if isStringInSlice(stringifiedItems, *vString) { - result.addError( - new(ItemsMustBeUniqueError), - context, - value, - ErrorDetails{"type": TYPE_ARRAY}, - ) - } - stringifiedItems = append(stringifiedItems, *vString) - } - } - - result.incrementScore() -} - -func (v *subSchema) validateObject(currentSubSchema *subSchema, value map[string]interface{}, result *Result, context *jsonContext) { - - if internalLogEnabled { - internalLog("validateObject %s", context.String()) - internalLog(" %v", value) - } - - // minProperties & maxProperties: - if currentSubSchema.minProperties != nil { - if len(value) < int(*currentSubSchema.minProperties) { - result.addError( - new(ArrayMinPropertiesError), - context, - value, - ErrorDetails{"min": *currentSubSchema.minProperties}, - ) - } - } - if currentSubSchema.maxProperties != nil { - if len(value) > int(*currentSubSchema.maxProperties) { - result.addError( - new(ArrayMaxPropertiesError), - context, - value, - ErrorDetails{"max": *currentSubSchema.maxProperties}, - ) - } - } - - // required: - for _, requiredProperty := range currentSubSchema.required { - _, ok := value[requiredProperty] - if ok { - result.incrementScore() - } else { - result.addError( - new(RequiredError), - context, - value, - ErrorDetails{"property": requiredProperty}, - ) - } - } - - // additionalProperty & patternProperty: - if currentSubSchema.additionalProperties != nil { - - switch currentSubSchema.additionalProperties.(type) { - case bool: - - if !currentSubSchema.additionalProperties.(bool) { - - for pk := range value { - - found := false - for _, spValue := range currentSubSchema.propertiesChildren { - if pk == spValue.property { - found = true - } - } - - pp_has, pp_match := v.validatePatternProperty(currentSubSchema, pk, value[pk], result, context) - - if found { - - if pp_has && !pp_match { - result.addError( - new(AdditionalPropertyNotAllowedError), - context, - value[pk], - ErrorDetails{"property": pk}, - ) - } - - } else { - - if !pp_has || !pp_match { - result.addError( - new(AdditionalPropertyNotAllowedError), - context, - value[pk], - ErrorDetails{"property": pk}, - ) - } - - } - } - } - - case *subSchema: - - additionalPropertiesSchema := currentSubSchema.additionalProperties.(*subSchema) - for pk := range value { - - found := false - for _, spValue := range currentSubSchema.propertiesChildren { - if pk == spValue.property { - found = true - } - } - - pp_has, pp_match := v.validatePatternProperty(currentSubSchema, pk, value[pk], result, context) - - if found { - - if pp_has && !pp_match { - validationResult := additionalPropertiesSchema.subValidateWithContext(value[pk], context) - result.mergeErrors(validationResult) - } - - } else { - - if !pp_has || !pp_match { - validationResult := additionalPropertiesSchema.subValidateWithContext(value[pk], context) - result.mergeErrors(validationResult) - } - - } - - } - } - } else { - - for pk := range value { - - pp_has, pp_match := v.validatePatternProperty(currentSubSchema, pk, value[pk], result, context) - - if pp_has && !pp_match { - - result.addError( - new(InvalidPropertyPatternError), - context, - value[pk], - ErrorDetails{ - "property": pk, - "pattern": currentSubSchema.PatternPropertiesString(), - }, - ) - } - - } - } - - result.incrementScore() -} - -func (v *subSchema) validatePatternProperty(currentSubSchema *subSchema, key string, value interface{}, result *Result, context *jsonContext) (has bool, matched bool) { - - if internalLogEnabled { - internalLog("validatePatternProperty %s", context.String()) - internalLog(" %s %v", key, value) - } - - has = false - - validatedkey := false - - for pk, pv := range currentSubSchema.patternProperties { - if matches, _ := regexp.MatchString(pk, key); matches { - has = true - subContext := newJsonContext(key, context) - validationResult := pv.subValidateWithContext(value, subContext) - result.mergeErrors(validationResult) - if validationResult.Valid() { - validatedkey = true - } - } - } - - if !validatedkey { - return has, false - } - - result.incrementScore() - - return has, true -} - -func (v *subSchema) validateString(currentSubSchema *subSchema, value interface{}, result *Result, context *jsonContext) { - - // Ignore JSON numbers - if isJsonNumber(value) { - return - } - - // Ignore non strings - if !isKind(value, reflect.String) { - return - } - - if internalLogEnabled { - internalLog("validateString %s", context.String()) - internalLog(" %v", value) - } - - stringValue := value.(string) - - // minLength & maxLength: - if currentSubSchema.minLength != nil { - if utf8.RuneCount([]byte(stringValue)) < int(*currentSubSchema.minLength) { - result.addError( - new(StringLengthGTEError), - context, - value, - ErrorDetails{"min": *currentSubSchema.minLength}, - ) - } - } - if currentSubSchema.maxLength != nil { - if utf8.RuneCount([]byte(stringValue)) > int(*currentSubSchema.maxLength) { - result.addError( - new(StringLengthLTEError), - context, - value, - ErrorDetails{"max": *currentSubSchema.maxLength}, - ) - } - } - - // pattern: - if currentSubSchema.pattern != nil { - if !currentSubSchema.pattern.MatchString(stringValue) { - result.addError( - new(DoesNotMatchPatternError), - context, - value, - ErrorDetails{"pattern": currentSubSchema.pattern}, - ) - - } - } - - // format - if currentSubSchema.format != "" { - if !FormatCheckers.IsFormat(currentSubSchema.format, stringValue) { - result.addError( - new(DoesNotMatchFormatError), - context, - value, - ErrorDetails{"format": currentSubSchema.format}, - ) - } - } - - result.incrementScore() -} - -func (v *subSchema) validateNumber(currentSubSchema *subSchema, value interface{}, result *Result, context *jsonContext) { - - // Ignore non numbers - if !isJsonNumber(value) { - return - } - - if internalLogEnabled { - internalLog("validateNumber %s", context.String()) - internalLog(" %v", value) - } - - number := value.(json.Number) - float64Value, _ := number.Float64() - - // multipleOf: - if currentSubSchema.multipleOf != nil { - - if !isFloat64AnInteger(float64Value / *currentSubSchema.multipleOf) { - result.addError( - new(MultipleOfError), - context, - resultErrorFormatJsonNumber(number), - ErrorDetails{"multiple": *currentSubSchema.multipleOf}, - ) - } - } - - //maximum & exclusiveMaximum: - if currentSubSchema.maximum != nil { - if currentSubSchema.exclusiveMaximum { - if float64Value >= *currentSubSchema.maximum { - result.addError( - new(NumberLTError), - context, - resultErrorFormatJsonNumber(number), - ErrorDetails{ - "max": resultErrorFormatNumber(*currentSubSchema.maximum), - }, - ) - } - } else { - if float64Value > *currentSubSchema.maximum { - result.addError( - new(NumberLTEError), - context, - resultErrorFormatJsonNumber(number), - ErrorDetails{ - "max": resultErrorFormatNumber(*currentSubSchema.maximum), - }, - ) - } - } - } - - //minimum & exclusiveMinimum: - if currentSubSchema.minimum != nil { - if currentSubSchema.exclusiveMinimum { - if float64Value <= *currentSubSchema.minimum { - result.addError( - new(NumberGTError), - context, - resultErrorFormatJsonNumber(number), - ErrorDetails{ - "min": resultErrorFormatNumber(*currentSubSchema.minimum), - }, - ) - } - } else { - if float64Value < *currentSubSchema.minimum { - result.addError( - new(NumberGTEError), - context, - resultErrorFormatJsonNumber(number), - ErrorDetails{ - "min": resultErrorFormatNumber(*currentSubSchema.minimum), - }, - ) - } - } - } - - result.incrementScore() -} diff --git a/website/data/providers.yaml b/website/data/providers.yaml index fdb2c830a..48124ceac 100644 --- a/website/data/providers.yaml +++ b/website/data/providers.yaml @@ -10,8 +10,6 @@ tag: cri - name: Huawei Cloud Container Instance (**CCI**) tag: huawei -- name: Hyper.sh - tag: hypersh - name: HashiCorp Nomad tag: nomad - name: Service Fabric Mesh