From 8d0b843ae40c867dfa89ef7b891df372bf0b045a Mon Sep 17 00:00:00 2001 From: Brian Goff Date: Fri, 19 Apr 2019 17:02:39 -0700 Subject: [PATCH] Refactor CLI initialization (#562) This cleans up the CLI code significantly. Also makes some of this re-usable for providers who want to do so. This also removes the main.go from the top of the tree of the repro, instead moving it into cmd/virtual-kubelet. This allows us to better utilize the package namespace (and e.g. mv the `vkubelet` package to the top of the tree). --- Gopkg.lock | 64 +- Gopkg.toml | 4 - Makefile | 11 +- cmd/client.go | 35 - cmd/providers.go | 55 - cmd/root.go | 404 ---- cmd/taint.go | 56 - cmd/version.go | 46 - .../commands/providers/provider.go | 54 + cmd/virtual-kubelet/commands/root/flag.go | 90 + .../commands/root}/http.go | 31 +- .../commands/root}/node.go | 57 +- cmd/virtual-kubelet/commands/root/opts.go | 143 ++ cmd/virtual-kubelet/commands/root/root.go | 204 ++ cmd/virtual-kubelet/commands/root/tracing.go | 102 + .../commands/version/version.go | 21 +- cmd/virtual-kubelet/main.go | 68 + .../opencensus/jaeger.go | 2 +- .../opencensus/ocagent.go | 2 +- .../opencensus/register.go | 34 +- .../opencensus/register_test.go | 2 +- vendor/github.com/fsnotify/fsnotify/AUTHORS | 52 - vendor/github.com/fsnotify/fsnotify/LICENSE | 28 - vendor/github.com/fsnotify/fsnotify/fen.go | 37 - .../github.com/fsnotify/fsnotify/fsnotify.go | 66 - .../github.com/fsnotify/fsnotify/inotify.go | 337 ---- .../fsnotify/fsnotify/inotify_poller.go | 187 -- vendor/github.com/fsnotify/fsnotify/kqueue.go | 521 ----- .../fsnotify/fsnotify/open_mode_bsd.go | 11 - .../fsnotify/fsnotify/open_mode_darwin.go | 12 - .../github.com/fsnotify/fsnotify/windows.go | 561 ------ .../hashicorp/hcl/hcl/printer/nodes.go | 789 -------- .../hashicorp/hcl/hcl/printer/printer.go | 66 - .../github.com/magiconair/properties/LICENSE | 25 - .../magiconair/properties/decode.go | 289 --- .../github.com/magiconair/properties/doc.go | 156 -- .../magiconair/properties/integrate.go | 34 - .../github.com/magiconair/properties/lex.go | 407 ---- .../github.com/magiconair/properties/load.go | 241 --- .../magiconair/properties/parser.go | 95 - .../magiconair/properties/properties.go | 822 -------- .../magiconair/properties/rangecheck.go | 31 - vendor/github.com/pelletier/go-toml/LICENSE | 21 - vendor/github.com/pelletier/go-toml/doc.go | 23 - vendor/github.com/pelletier/go-toml/fuzz.go | 31 - .../pelletier/go-toml/keysparsing.go | 85 - vendor/github.com/pelletier/go-toml/lexer.go | 750 ------- .../github.com/pelletier/go-toml/marshal.go | 600 ------ vendor/github.com/pelletier/go-toml/parser.go | 430 ---- .../github.com/pelletier/go-toml/position.go | 29 - vendor/github.com/pelletier/go-toml/token.go | 144 -- vendor/github.com/pelletier/go-toml/toml.go | 309 --- .../pelletier/go-toml/tomltree_create.go | 142 -- .../pelletier/go-toml/tomltree_write.go | 289 --- vendor/github.com/spf13/afero/LICENSE.txt | 174 -- vendor/github.com/spf13/afero/afero.go | 108 - vendor/github.com/spf13/afero/basepath.go | 180 -- .../github.com/spf13/afero/cacheOnReadFs.go | 290 --- vendor/github.com/spf13/afero/const_bsds.go | 22 - .../github.com/spf13/afero/const_win_unix.go | 25 - .../github.com/spf13/afero/copyOnWriteFs.go | 292 --- vendor/github.com/spf13/afero/httpFs.go | 110 - vendor/github.com/spf13/afero/ioutil.go | 230 --- vendor/github.com/spf13/afero/lstater.go | 27 - vendor/github.com/spf13/afero/match.go | 110 - vendor/github.com/spf13/afero/mem/dir.go | 37 - vendor/github.com/spf13/afero/mem/dirmap.go | 43 - vendor/github.com/spf13/afero/mem/file.go | 314 --- vendor/github.com/spf13/afero/memmap.go | 365 ---- vendor/github.com/spf13/afero/os.go | 101 - vendor/github.com/spf13/afero/path.go | 106 - vendor/github.com/spf13/afero/readonlyfs.go | 80 - vendor/github.com/spf13/afero/regexpfs.go | 214 -- vendor/github.com/spf13/afero/unionFile.go | 305 --- vendor/github.com/spf13/afero/util.go | 330 --- vendor/github.com/spf13/cast/LICENSE | 21 - vendor/github.com/spf13/cast/cast.go | 159 -- vendor/github.com/spf13/cast/caste.go | 1166 ----------- .../spf13/jwalterweatherman/LICENSE | 21 - .../jwalterweatherman/default_notepad.go | 113 -- .../spf13/jwalterweatherman/log_counter.go | 55 - .../spf13/jwalterweatherman/notepad.go | 194 -- vendor/github.com/spf13/viper/LICENSE | 21 - vendor/github.com/spf13/viper/flags.go | 57 - vendor/github.com/spf13/viper/util.go | 221 -- vendor/github.com/spf13/viper/viper.go | 1771 ----------------- 86 files changed, 765 insertions(+), 15602 deletions(-) delete mode 100644 cmd/client.go delete mode 100644 cmd/providers.go delete mode 100644 cmd/root.go delete mode 100644 cmd/taint.go delete mode 100644 cmd/version.go create mode 100644 cmd/virtual-kubelet/commands/providers/provider.go create mode 100644 cmd/virtual-kubelet/commands/root/flag.go rename cmd/{ => virtual-kubelet/commands/root}/http.go (76%) rename cmd/{ => virtual-kubelet/commands/root}/node.go (51%) create mode 100644 cmd/virtual-kubelet/commands/root/opts.go create mode 100644 cmd/virtual-kubelet/commands/root/root.go create mode 100644 cmd/virtual-kubelet/commands/root/tracing.go rename main.go => cmd/virtual-kubelet/commands/version/version.go (56%) create mode 100644 cmd/virtual-kubelet/main.go rename cmd/census_jaeger.go => trace/opencensus/jaeger.go (97%) rename cmd/census_ocagent.go => trace/opencensus/ocagent.go (98%) rename cmd/census_tracing.go => trace/opencensus/register.go (71%) rename cmd/census_test.go => trace/opencensus/register_test.go (98%) delete mode 100644 vendor/github.com/fsnotify/fsnotify/AUTHORS delete mode 100644 vendor/github.com/fsnotify/fsnotify/LICENSE delete mode 100644 vendor/github.com/fsnotify/fsnotify/fen.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/fsnotify.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/inotify.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/inotify_poller.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/kqueue.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/windows.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go delete mode 100644 vendor/github.com/hashicorp/hcl/hcl/printer/printer.go delete mode 100644 vendor/github.com/magiconair/properties/LICENSE delete mode 100644 vendor/github.com/magiconair/properties/decode.go delete mode 100644 vendor/github.com/magiconair/properties/doc.go delete mode 100644 vendor/github.com/magiconair/properties/integrate.go delete mode 100644 vendor/github.com/magiconair/properties/lex.go delete mode 100644 vendor/github.com/magiconair/properties/load.go delete mode 100644 vendor/github.com/magiconair/properties/parser.go delete mode 100644 vendor/github.com/magiconair/properties/properties.go delete mode 100644 vendor/github.com/magiconair/properties/rangecheck.go delete mode 100644 vendor/github.com/pelletier/go-toml/LICENSE delete mode 100644 vendor/github.com/pelletier/go-toml/doc.go delete mode 100644 vendor/github.com/pelletier/go-toml/fuzz.go delete mode 100644 vendor/github.com/pelletier/go-toml/keysparsing.go delete mode 100644 vendor/github.com/pelletier/go-toml/lexer.go delete mode 100644 vendor/github.com/pelletier/go-toml/marshal.go delete mode 100644 vendor/github.com/pelletier/go-toml/parser.go delete mode 100644 vendor/github.com/pelletier/go-toml/position.go delete mode 100644 vendor/github.com/pelletier/go-toml/token.go delete mode 100644 vendor/github.com/pelletier/go-toml/toml.go delete mode 100644 vendor/github.com/pelletier/go-toml/tomltree_create.go delete mode 100644 vendor/github.com/pelletier/go-toml/tomltree_write.go delete mode 100644 vendor/github.com/spf13/afero/LICENSE.txt delete mode 100644 vendor/github.com/spf13/afero/afero.go delete mode 100644 vendor/github.com/spf13/afero/basepath.go delete mode 100644 vendor/github.com/spf13/afero/cacheOnReadFs.go delete mode 100644 vendor/github.com/spf13/afero/const_bsds.go delete mode 100644 vendor/github.com/spf13/afero/const_win_unix.go delete mode 100644 vendor/github.com/spf13/afero/copyOnWriteFs.go delete mode 100644 vendor/github.com/spf13/afero/httpFs.go delete mode 100644 vendor/github.com/spf13/afero/ioutil.go delete mode 100644 vendor/github.com/spf13/afero/lstater.go delete mode 100644 vendor/github.com/spf13/afero/match.go delete mode 100644 vendor/github.com/spf13/afero/mem/dir.go delete mode 100644 vendor/github.com/spf13/afero/mem/dirmap.go delete mode 100644 vendor/github.com/spf13/afero/mem/file.go delete mode 100644 vendor/github.com/spf13/afero/memmap.go delete mode 100644 vendor/github.com/spf13/afero/os.go delete mode 100644 vendor/github.com/spf13/afero/path.go delete mode 100644 vendor/github.com/spf13/afero/readonlyfs.go delete mode 100644 vendor/github.com/spf13/afero/regexpfs.go delete mode 100644 vendor/github.com/spf13/afero/unionFile.go delete mode 100644 vendor/github.com/spf13/afero/util.go delete mode 100644 vendor/github.com/spf13/cast/LICENSE delete mode 100644 vendor/github.com/spf13/cast/cast.go delete mode 100644 vendor/github.com/spf13/cast/caste.go delete mode 100644 vendor/github.com/spf13/jwalterweatherman/LICENSE delete mode 100644 vendor/github.com/spf13/jwalterweatherman/default_notepad.go delete mode 100644 vendor/github.com/spf13/jwalterweatherman/log_counter.go delete mode 100644 vendor/github.com/spf13/jwalterweatherman/notepad.go delete mode 100644 vendor/github.com/spf13/viper/LICENSE delete mode 100644 vendor/github.com/spf13/viper/flags.go delete mode 100644 vendor/github.com/spf13/viper/util.go delete mode 100644 vendor/github.com/spf13/viper/viper.go diff --git a/Gopkg.lock b/Gopkg.lock index a90fed758..96bb9d92f 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -347,14 +347,6 @@ revision = "72bf35d0ff611848c1dc9df0f976c81192392fa5" version = "v4.1.0" -[[projects]] - digest = "1:1b91ae0dc69a41d4c2ed23ea5cffb721ea63f5037ca4b81e6d6771fbb8f45129" - name = "github.com/fsnotify/fsnotify" - packages = ["."] - pruneopts = "NUT" - revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9" - version = "v1.4.7" - [[projects]] digest = "1:e05711632e1515319b014e8fe4cbe1d30ab024c473403f60cf0fdeb4c586a474" name = "github.com/go-ini/ini" @@ -707,13 +699,12 @@ [[projects]] branch = "master" - digest = "1:11c6c696067d3127ecf332b10f89394d386d9083f82baf71f40f2da31841a009" + digest = "1:39f543569bf189e228c84a294c50aca8ea56c82b3d9df5c9b788249907d7049a" name = "github.com/hashicorp/hcl" packages = [ ".", "hcl/ast", "hcl/parser", - "hcl/printer", "hcl/scanner", "hcl/strconv", "hcl/token", @@ -831,14 +822,6 @@ revision = "9a9182c0c682798fe214fa11df7bfca83048d433" version = "v0.5.1" -[[projects]] - digest = "1:35e5598ba3bc950039e13d91958ce37f675264735907cada569d11b243e5cacb" - name = "github.com/magiconair/properties" - packages = ["."] - pruneopts = "NUT" - revision = "c3beff4c2358b44d0493c7dda585e7db7ff28ae6" - version = "v1.7.6" - [[projects]] branch = "master" digest = "1:ada518b8c338e10e0afa443d84671476d3bd1d926e13713938088e8ddbee1a3e" @@ -943,14 +926,6 @@ revision = "e790cca94e6cc75c7064b1332e63811d4aae1a53" version = "v1.1" -[[projects]] - digest = "1:13b8f1a2ce177961dc9231606a52f709fab896c565f3988f60a7f6b4e543a902" - name = "github.com/pelletier/go-toml" - packages = ["."] - pruneopts = "NUT" - revision = "acdc4509485b587f5e675510c4f2c63e90ff68a8" - version = "v1.1.0" - [[projects]] branch = "master" digest = "1:3bf17a6e6eaa6ad24152148a631d18662f7212e21637c2699bff3369b7f00fa2" @@ -1010,25 +985,6 @@ revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3" version = "v1.2.0" -[[projects]] - digest = "1:35f36ea322654e3d0932e58ad26556998260b9fa9e691471f756d173684eac0a" - name = "github.com/spf13/afero" - packages = [ - ".", - "mem", - ] - pruneopts = "NUT" - revision = "63644898a8da0bc22138abf860edaf5277b6102e" - version = "v1.1.0" - -[[projects]] - digest = "1:3fa7947ca83b98ae553590d993886e845a4bff19b7b007e869c6e0dd3b9da9cd" - name = "github.com/spf13/cast" - packages = ["."] - pruneopts = "NUT" - revision = "8965335b8c7107321228e3e3702cab9832751bac" - version = "v1.2.0" - [[projects]] digest = "1:0f156dbd01b40676bdcbc64e51535c09b50f83c9cca5faef3090f82f18bda3c2" name = "github.com/spf13/cobra" @@ -1037,14 +993,6 @@ revision = "a1f051bc3eba734da4772d60e2d677f47cf93ef4" version = "v0.0.2" -[[projects]] - branch = "master" - digest = "1:f29f83301ed096daed24a90f4af591b7560cb14b9cc3e1827abbf04db7269ab5" - name = "github.com/spf13/jwalterweatherman" - packages = ["."] - pruneopts = "NUT" - revision = "7c0cea34c8ece3fbeb2b27ab9b59511d360fb394" - [[projects]] digest = "1:15e5c398fbd9d2c439b635a08ac161b13d04f0c2aa587fe256b65dc0c3efe8b7" name = "github.com/spf13/pflag" @@ -1053,14 +1001,6 @@ revision = "583c0c0531f06d5278b7d917446061adc344b5cd" version = "v1.0.1" -[[projects]] - digest = "1:ea67fb4941c0a1a92f828e73cf426533c71db02df45a2cdf55a14c3e7b74c07a" - name = "github.com/spf13/viper" - packages = ["."] - pruneopts = "NUT" - revision = "b5e8006cbee93ec955a89ab31e0e3ce3204f3736" - version = "v1.0.2" - [[projects]] digest = "1:10301358a67805684f6b525cba6ad7ec014dbd56cccc2926fadc9189faa7889a" name = "github.com/stretchr/objx" @@ -1840,7 +1780,7 @@ "github.com/mitchellh/go-homedir", "github.com/pkg/errors", "github.com/spf13/cobra", - "github.com/spf13/viper", + "github.com/spf13/pflag", "github.com/stretchr/testify/mock", "github.com/virtual-kubelet/azure-aci/client", "github.com/virtual-kubelet/azure-aci/client/aci", diff --git a/Gopkg.toml b/Gopkg.toml index 9dd52365d..74da40cb3 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -33,10 +33,6 @@ name = "github.com/spf13/cobra" version = "0.0.1" -[[constraint]] - name = "github.com/spf13/viper" - version = "1.0.0" - [[constraint]] name = "github.com/google/uuid" version = "0.2.0" diff --git a/Makefile b/Makefile index 478591b4b..a285b0579 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,4 @@ SHELL := /bin/bash -IMPORT_PATH := github.com/virtual-kubelet/virtual-kubelet DOCKER_IMAGE := virtual-kubelet exec := $(DOCKER_IMAGE) @@ -28,7 +27,7 @@ build: build_tags := netgo osusergo build: OUTPUT_DIR ?= bin build: authors @echo "Building..." - $Q CGO_ENABLED=0 go build -a --tags '$(shell scripts/process_build_tags.sh $(build_tags) $(VK_BUILD_TAGS))' -ldflags '-extldflags "-static"' -o $(OUTPUT_DIR)/$(binary) $(if $V,-v) $(VERSION_FLAGS) $(IMPORT_PATH) + $Q CGO_ENABLED=0 go build -a --tags '$(shell scripts/process_build_tags.sh $(build_tags) $(VK_BUILD_TAGS))' -ldflags '-extldflags "-static"' -o $(OUTPUT_DIR)/$(binary) $(if $V,-v) $(VERSION_FLAGS) ./cmd/$(binary) .PHONY: tags tags: @@ -39,14 +38,6 @@ tags: release: build $(GOPATH)/bin/goreleaser goreleaser - -### Code not in the repository root? Another binary? Add to the path like this. -# .PHONY: otherbin -# otherbin: .GOPATH/.ok -# $Q go install $(if $V,-v) $(VERSION_FLAGS) $(IMPORT_PATH)/cmd/otherbin - -##### ^^^^^^ EDIT ABOVE ^^^^^^ ##### - ##### =====> Utility targets <===== ##### .PHONY: clean test list cover format docker deps diff --git a/cmd/client.go b/cmd/client.go deleted file mode 100644 index d0921572e..000000000 --- a/cmd/client.go +++ /dev/null @@ -1,35 +0,0 @@ -package cmd - -import ( - "os" - - "github.com/pkg/errors" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" -) - -func newClient(configPath string) (*kubernetes.Clientset, error) { - var config *rest.Config - - // Check if the kubeConfig file exists. - if _, err := os.Stat(configPath); !os.IsNotExist(err) { - // Get the kubeconfig from the filepath. - config, err = clientcmd.BuildConfigFromFlags("", configPath) - if err != nil { - return nil, errors.Wrap(err, "error building client config") - } - } else { - // Set to in-cluster config. - config, err = rest.InClusterConfig() - if err != nil { - return nil, errors.Wrap(err, "error building in cluster config") - } - } - - if masterURI := os.Getenv("MASTER_URI"); masterURI != "" { - config.Host = masterURI - } - - return kubernetes.NewForConfig(config) -} diff --git a/cmd/providers.go b/cmd/providers.go deleted file mode 100644 index 29fed9ab2..000000000 --- a/cmd/providers.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright © 2017 NAME HERE -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cmd - -import ( - "fmt" - "os" - - "github.com/spf13/cobra" - "github.com/virtual-kubelet/virtual-kubelet/providers/register" -) - -// versionCmd represents the version command -var providersCmd = &cobra.Command{ - Use: "providers", - Short: "Show the list of supported providers", - Long: "Show the list of supported providers", - Args: cobra.MaximumNArgs(2), - Run: func(cmd *cobra.Command, args []string) { - switch len(args) { - case 0: - ls := register.List() - for _, p := range ls { - fmt.Fprintln(cmd.OutOrStdout(), p) - } - case 1: - if !register.Exists(args[0]) { - fmt.Fprintln(cmd.OutOrStderr(), "no such provider", args[0]) - - // TODO(@cpuuy83): would be nice to not short-circuit the exit here - // But at the momemt this seems to be the only way to exit non-zero and - // handle our own error output - os.Exit(1) - } - fmt.Fprintln(cmd.OutOrStdout(), args[0]) - } - return - }, -} - -func init() { - RootCmd.AddCommand(providersCmd) -} diff --git a/cmd/root.go b/cmd/root.go deleted file mode 100644 index cac569f80..000000000 --- a/cmd/root.go +++ /dev/null @@ -1,404 +0,0 @@ -// Copyright © 2017 The virtual-kubelet authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cmd - -import ( - "context" - "fmt" - "os" - "os/signal" - "path/filepath" - "strconv" - "strings" - "syscall" - "time" - - "github.com/Sirupsen/logrus" - "github.com/mitchellh/go-homedir" - "github.com/pkg/errors" - "github.com/spf13/cobra" - "github.com/spf13/viper" - "github.com/virtual-kubelet/virtual-kubelet/log" - logruslogger "github.com/virtual-kubelet/virtual-kubelet/log/logrus" - "github.com/virtual-kubelet/virtual-kubelet/manager" - "github.com/virtual-kubelet/virtual-kubelet/providers" - "github.com/virtual-kubelet/virtual-kubelet/providers/register" - "github.com/virtual-kubelet/virtual-kubelet/trace" - "github.com/virtual-kubelet/virtual-kubelet/trace/opencensus" - "github.com/virtual-kubelet/virtual-kubelet/vkubelet" - octrace "go.opencensus.io/trace" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - kubeinformers "k8s.io/client-go/informers" - corev1informers "k8s.io/client-go/informers/core/v1" - "k8s.io/client-go/kubernetes" -) - -const ( - defaultDaemonPort = "10250" - // kubeSharedInformerFactoryDefaultResync is the default resync period used by the shared informer factories for Kubernetes resources. - // It is set to the same value used by the Kubelet, and can be overridden via the "--full-resync-period" flag. - // https://github.com/kubernetes/kubernetes/blob/v1.12.2/pkg/kubelet/apis/config/v1beta1/defaults.go#L51 - kubeSharedInformerFactoryDefaultResync = 1 * time.Minute -) - -var kubeletConfig string -var kubeConfig string -var kubeNamespace string -var nodeName string -var operatingSystem string -var provider string -var providerConfig string -var taintKey string -var disableTaint bool -var logLevel string -var metricsAddr string -var taint *corev1.Taint -var k8sClient *kubernetes.Clientset -var p providers.Provider -var rm *manager.ResourceManager -var apiConfig *apiServerConfig -var podInformer corev1informers.PodInformer -var kubeSharedInformerFactoryResync time.Duration -var podSyncWorkers int -var enableNodeLease bool - -var userTraceExporters []string -var userTraceConfig = TracingExporterOptions{Tags: make(map[string]string)} -var traceSampler string - -// Create a root context to be used by the pod controller and by the shared informer factories. -var rootContext, rootContextCancel = context.WithCancel(context.Background()) - -// RootCmd represents the base command when called without any subcommands -var RootCmd = &cobra.Command{ - Use: "virtual-kubelet", - Short: "virtual-kubelet provides a virtual kubelet interface for your kubernetes cluster.", - Long: `virtual-kubelet implements the Kubelet interface with a pluggable -backend implementation allowing users to create kubernetes nodes without running the kubelet. -This allows users to schedule kubernetes workloads on nodes that aren't running Kubernetes.`, - Run: func(cmd *cobra.Command, args []string) { - initConfig() - - defer rootContextCancel() - - pNode := NodeFromProvider(rootContext, nodeName, taint, p) - node, err := vkubelet.NewNode( - vkubelet.NaiveNodeProvider{}, - pNode, - k8sClient.Coordination().Leases(corev1.NamespaceNodeLease), - k8sClient.CoreV1().Nodes(), - vkubelet.WithNodeDisableLease(!enableNodeLease), - ) - if err != nil { - log.G(rootContext).Fatal(err) - } - - vk := vkubelet.New(vkubelet.Config{ - Client: k8sClient, - Namespace: kubeNamespace, - NodeName: pNode.Name, - Provider: p, - ResourceManager: rm, - PodSyncWorkers: podSyncWorkers, - PodInformer: podInformer, - }) - - sig := make(chan os.Signal, 1) - signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM) - go func() { - <-sig - rootContextCancel() - }() - - cancelHTTP, err := setupHTTPServer(rootContext, apiConfig) - if err != nil { - log.G(rootContext).Fatal(err) - } - defer cancelHTTP() - - go func() { - if err := vk.Run(rootContext); err != nil && errors.Cause(err) != context.Canceled { - log.G(rootContext).Fatal(err) - } - }() - - go func() { - if err := node.Run(rootContext); err != nil { - log.G(rootContext).Fatal(err) - } - }() - <-rootContext.Done() - }, -} - -// Execute adds all child commands to the root command and sets flags appropriately. -// This is called by main.main(). It only needs to happen once to the rootCmd. -func Execute() { - if err := RootCmd.Execute(); err != nil { - log.GetLogger(context.TODO()).WithError(err).Fatal("Error executing root command") - } -} - -type mapVar map[string]string - -func (mv mapVar) String() string { - var s string - for k, v := range mv { - if s == "" { - s = fmt.Sprintf("%s=%v", k, v) - } else { - s += fmt.Sprintf(", %s=%v", k, v) - } - } - return s -} - -func (mv mapVar) Set(s string) error { - split := strings.SplitN(s, "=", 2) - if len(split) != 2 { - return errors.Errorf("invalid format, must be `key=value`: %s", s) - } - - _, ok := mv[split[0]] - if ok { - return errors.Errorf("duplicate key: %s", split[0]) - } - mv[split[0]] = split[1] - return nil -} - -func (mv mapVar) Type() string { - return "map" -} - -func init() { - // make sure the default logger/tracer is initialized - log.L = logruslogger.FromLogrus(logrus.NewEntry(logrus.StandardLogger())) - trace.T = opencensus.Adapter{} - - // read default node name from environment variable. - // it can be overwritten by cli flags if specified. - defaultNodeName := os.Getenv("DEFAULT_NODE_NAME") - if defaultNodeName == "" { - defaultNodeName = "virtual-kubelet" - } - // Here you will define your flags and configuration settings. - // Cobra supports persistent flags, which, if defined here, - // will be global for your application. - //RootCmd.PersistentFlags().StringVar(&kubeletConfig, "config", "", "config file (default is $HOME/.virtual-kubelet.yaml)") - RootCmd.PersistentFlags().StringVar(&kubeConfig, "kubeconfig", "", "config file (default is $HOME/.kube/config)") - RootCmd.PersistentFlags().StringVar(&kubeNamespace, "namespace", "", "kubernetes namespace (default is 'all')") - RootCmd.PersistentFlags().StringVar(&nodeName, "nodename", defaultNodeName, "kubernetes node name") - RootCmd.PersistentFlags().StringVar(&operatingSystem, "os", "Linux", "Operating System (Linux/Windows)") - RootCmd.PersistentFlags().StringVar(&provider, "provider", "", "cloud provider") - RootCmd.PersistentFlags().BoolVar(&disableTaint, "disable-taint", false, "disable the virtual-kubelet node taint") - RootCmd.PersistentFlags().StringVar(&providerConfig, "provider-config", "", "cloud provider configuration file") - RootCmd.PersistentFlags().StringVar(&metricsAddr, "metrics-addr", ":10255", "address to listen for metrics/stats requests") - - RootCmd.PersistentFlags().StringVar(&taintKey, "taint", "", "Set node taint key") - RootCmd.PersistentFlags().MarkDeprecated("taint", "Taint key should now be configured using the VK_TAINT_KEY environment variable") - RootCmd.PersistentFlags().StringVar(&logLevel, "log-level", "info", `set the log level, e.g. "trace", debug", "info", "warn", "error"`) - RootCmd.PersistentFlags().IntVar(&podSyncWorkers, "pod-sync-workers", 10, `set the number of pod synchronization workers`) - RootCmd.PersistentFlags().BoolVar(&enableNodeLease, "enable-node-lease", false, `use node leases (1.13) for node heartbeats`) - - RootCmd.PersistentFlags().StringSliceVar(&userTraceExporters, "trace-exporter", nil, fmt.Sprintf("sets the tracing exporter to use, available exporters: %s", AvailableTraceExporters())) - RootCmd.PersistentFlags().StringVar(&userTraceConfig.ServiceName, "trace-service-name", "virtual-kubelet", "sets the name of the service used to register with the trace exporter") - RootCmd.PersistentFlags().Var(mapVar(userTraceConfig.Tags), "trace-tag", "add tags to include with traces in key=value form") - RootCmd.PersistentFlags().StringVar(&traceSampler, "trace-sample-rate", "", "set probability of tracing samples") - - RootCmd.PersistentFlags().DurationVar(&kubeSharedInformerFactoryResync, "full-resync-period", kubeSharedInformerFactoryDefaultResync, "how often to perform a full resync of pods between kubernetes and the provider") - - // Cobra also supports local flags, which will only run - // when this action is called directly. - // RootCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") -} - -// initConfig reads in config file and ENV variables if set. -func initConfig() { - if provider == "" { - log.G(context.TODO()).Fatal("You must supply a cloud provider option: use --provider") - } - - // Find home directory. - home, err := homedir.Dir() - if err != nil { - log.G(context.TODO()).WithError(err).Fatal("Error reading homedir") - } - - if kubeletConfig != "" { - // Use config file from the flag. - viper.SetConfigFile(kubeletConfig) - } else { - // Search config in home directory with name ".virtual-kubelet" (without extension). - viper.AddConfigPath(home) - viper.SetConfigName(".virtual-kubelet") - } - - viper.AutomaticEnv() // read in environment variables that match - - // If a config file is found, read it in. - if err := viper.ReadInConfig(); err == nil { - log.G(context.TODO()).Debugf("Using config file %s", viper.ConfigFileUsed()) - } - - if kubeConfig == "" { - kubeConfig = filepath.Join(home, ".kube", "config") - - } - - if kubeNamespace == "" { - kubeNamespace = corev1.NamespaceAll - } - - // Validate operating system. - ok, _ := providers.ValidOperatingSystems[operatingSystem] - if !ok { - log.G(context.TODO()).WithField("OperatingSystem", operatingSystem).Fatalf("Operating system not supported. Valid options are: %s", strings.Join(providers.ValidOperatingSystems.Names(), " | ")) - } - - level, err := logrus.ParseLevel(logLevel) - if err != nil { - log.G(context.TODO()).WithField("logLevel", logLevel).Fatal("log level is not supported") - } - - logrus.SetLevel(level) - logger := logruslogger.FromLogrus(logrus.WithFields(logrus.Fields{ - "provider": provider, - "operatingSystem": operatingSystem, - "node": nodeName, - "namespace": kubeNamespace, - })) - - rootContext = log.WithLogger(rootContext, logger) - - log.L = logger - - if !disableTaint { - taint, err = getTaint(taintKey, provider) - if err != nil { - logger.WithError(err).Fatal("Error setting up desired kubernetes node taint") - } - } - - k8sClient, err = newClient(kubeConfig) - if err != nil { - logger.WithError(err).Fatal("Error creating kubernetes client") - } - - // Create a shared informer factory for Kubernetes pods in the current namespace (if specified) and scheduled to the current node. - podInformerFactory := kubeinformers.NewSharedInformerFactoryWithOptions(k8sClient, kubeSharedInformerFactoryResync, kubeinformers.WithNamespace(kubeNamespace), kubeinformers.WithTweakListOptions(func(options *metav1.ListOptions) { - options.FieldSelector = fields.OneTermEqualSelector("spec.nodeName", nodeName).String() - })) - // Create a pod informer so we can pass its lister to the resource manager. - podInformer = podInformerFactory.Core().V1().Pods() - - // Create another shared informer factory for Kubernetes secrets and configmaps (not subject to any selectors). - scmInformerFactory := kubeinformers.NewSharedInformerFactoryWithOptions(k8sClient, kubeSharedInformerFactoryResync) - // Create a secret informer and a config map informer so we can pass their listers to the resource manager. - secretInformer := scmInformerFactory.Core().V1().Secrets() - configMapInformer := scmInformerFactory.Core().V1().ConfigMaps() - - // Create a service informer so we can pass its lister to the resource manager. - serviceInformer := scmInformerFactory.Core().V1().Services() - - // Create a new instance of the resource manager that uses the listers above for pods, secrets and config maps. - rm, err = manager.NewResourceManager(podInformer.Lister(), secretInformer.Lister(), configMapInformer.Lister(), serviceInformer.Lister()) - if err != nil { - logger.WithError(err).Fatal("Error initializing resource manager") - } - - // Start the shared informer factory for pods. - go podInformerFactory.Start(rootContext.Done()) - // Start the shared informer factory for secrets and configmaps. - go scmInformerFactory.Start(rootContext.Done()) - - daemonPortEnv := getEnv("KUBELET_PORT", defaultDaemonPort) - daemonPort, err := strconv.ParseInt(daemonPortEnv, 10, 32) - if err != nil { - logger.WithError(err).WithField("value", daemonPortEnv).Fatal("Invalid value from KUBELET_PORT in environment") - } - - initConfig := register.InitConfig{ - ConfigPath: providerConfig, - NodeName: nodeName, - OperatingSystem: operatingSystem, - ResourceManager: rm, - DaemonPort: int32(daemonPort), - InternalIP: os.Getenv("VKUBELET_POD_IP"), - } - - p, err = register.GetProvider(provider, initConfig) - if err != nil { - logger.WithError(err).Fatal("Error initializing provider") - } - - apiConfig, err = getAPIConfig(metricsAddr) - if err != nil { - logger.WithError(err).Fatal("Error reading API config") - } - - if podSyncWorkers <= 0 { - logger.Fatal("The number of pod synchronization workers should not be negative") - } - - for k := range userTraceConfig.Tags { - if reservedTagNames[k] { - logger.WithField("tag", k).Fatal("must not use a reserved tag key") - } - } - userTraceConfig.Tags["operatingSystem"] = operatingSystem - userTraceConfig.Tags["provider"] = provider - userTraceConfig.Tags["nodeName"] = nodeName - for _, e := range userTraceExporters { - if e == "zpages" { - go setupZpages() - continue - } - exporter, err := GetTracingExporter(e, userTraceConfig) - if err != nil { - log.L.WithError(err).WithField("exporter", e).Fatal("Cannot initialize exporter") - } - octrace.RegisterExporter(exporter) - } - if len(userTraceExporters) > 0 { - var s octrace.Sampler - switch strings.ToLower(traceSampler) { - case "": - case "always": - s = octrace.AlwaysSample() - case "never": - s = octrace.NeverSample() - default: - rate, err := strconv.Atoi(traceSampler) - if err != nil { - logger.WithError(err).WithField("rate", traceSampler).Fatal("unsupported trace sample rate, supported values: always, never, or number 0-100") - } - if rate < 0 || rate > 100 { - logger.WithField("rate", traceSampler).Fatal("trace sample rate must not be less than zero or greater than 100") - } - s = octrace.ProbabilitySampler(float64(rate) / 100) - } - - if s != nil { - octrace.ApplyConfig( - octrace.Config{ - DefaultSampler: s, - }, - ) - } - } -} diff --git a/cmd/taint.go b/cmd/taint.go deleted file mode 100644 index fd5a47db3..000000000 --- a/cmd/taint.go +++ /dev/null @@ -1,56 +0,0 @@ -package cmd - -import ( - "os" - - "github.com/cpuguy83/strongerrors" - - "github.com/pkg/errors" - corev1 "k8s.io/api/core/v1" -) - -// Default taint values -const ( - DefaultTaintEffect = corev1.TaintEffectNoSchedule - DefaultTaintKey = "virtual-kubelet.io/provider" -) - -func getEnv(key, defaultValue string) string { - value, found := os.LookupEnv(key) - if found { - return value - } - return defaultValue -} - -// getTaint creates a taint using the provided key/value. -// Taint effect is read from the environment -// The taint key/value may be overwritten by the environment. -func getTaint(key, value string) (*corev1.Taint, error) { - if key == "" { - key = DefaultTaintKey - value = provider - } - - key = getEnv("VKUBELET_TAINT_KEY", key) - value = getEnv("VKUBELET_TAINT_VALUE", value) - effectEnv := getEnv("VKUBELET_TAINT_EFFECT", string(DefaultTaintEffect)) - - var effect corev1.TaintEffect - switch effectEnv { - case "NoSchedule": - effect = corev1.TaintEffectNoSchedule - case "NoExecute": - effect = corev1.TaintEffectNoExecute - case "PreferNoSchedule": - effect = corev1.TaintEffectPreferNoSchedule - default: - return nil, strongerrors.InvalidArgument(errors.Errorf("taint effect %q is not supported", effectEnv)) - } - - return &corev1.Taint{ - Key: key, - Value: value, - Effect: effect, - }, nil -} diff --git a/cmd/version.go b/cmd/version.go deleted file mode 100644 index 20545e18b..000000000 --- a/cmd/version.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright © 2017 NAME HERE -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cmd - -import ( - "fmt" - - "github.com/spf13/cobra" - "github.com/virtual-kubelet/virtual-kubelet/version" -) - -// versionCmd represents the version command -var versionCmd = &cobra.Command{ - Use: "version", - Short: "Show the version of the program", - Long: `Show the version of the program`, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Version: %s, Built: %s\n", version.Version, version.BuildTime) - }, -} - -func init() { - RootCmd.AddCommand(versionCmd) - - // Here you will define your flags and configuration settings. - - // Cobra supports Persistent Flags which will work for this command - // and all subcommands, e.g.: - // versionCmd.PersistentFlags().String("foo", "", "A help for foo") - - // Cobra supports local flags which will only run when this command - // is called directly, e.g.: - // versionCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") -} diff --git a/cmd/virtual-kubelet/commands/providers/provider.go b/cmd/virtual-kubelet/commands/providers/provider.go new file mode 100644 index 000000000..8da5b67c0 --- /dev/null +++ b/cmd/virtual-kubelet/commands/providers/provider.go @@ -0,0 +1,54 @@ +// Copyright © 2017 The virtual-kubelet authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package providers + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" + "github.com/virtual-kubelet/virtual-kubelet/providers/register" +) + +// NewCommand creates a new providers subcommand +// This subcommand is used to determine which providers are registered. +func NewCommand() *cobra.Command { + return &cobra.Command{ + Use: "providers", + Short: "Show the list of supported providers", + Long: "Show the list of supported providers", + Args: cobra.MaximumNArgs(2), + Run: func(cmd *cobra.Command, args []string) { + switch len(args) { + case 0: + ls := register.List() + for _, p := range ls { + fmt.Fprintln(cmd.OutOrStdout(), p) + } + case 1: + if !register.Exists(args[0]) { + fmt.Fprintln(cmd.OutOrStderr(), "no such provider", args[0]) + + // TODO(@cpuuy83): would be nice to not short-circuit the exit here + // But at the momemt this seems to be the only way to exit non-zero and + // handle our own error output + os.Exit(1) + } + fmt.Fprintln(cmd.OutOrStdout(), args[0]) + } + return + }, + } +} diff --git a/cmd/virtual-kubelet/commands/root/flag.go b/cmd/virtual-kubelet/commands/root/flag.go new file mode 100644 index 000000000..3a5bc74ee --- /dev/null +++ b/cmd/virtual-kubelet/commands/root/flag.go @@ -0,0 +1,90 @@ +// Copyright © 2017 The virtual-kubelet authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package root + +import ( + "fmt" + "os" + "strings" + + "github.com/pkg/errors" + "github.com/spf13/pflag" + "github.com/virtual-kubelet/virtual-kubelet/trace/opencensus" +) + +type mapVar map[string]string + +func (mv mapVar) String() string { + var s string + for k, v := range mv { + if s == "" { + s = fmt.Sprintf("%s=%v", k, v) + } else { + s += fmt.Sprintf(", %s=%v", k, v) + } + } + return s +} + +func (mv mapVar) Set(s string) error { + split := strings.SplitN(s, "=", 2) + if len(split) != 2 { + return errors.Errorf("invalid format, must be `key=value`: %s", s) + } + + _, ok := mv[split[0]] + if ok { + return errors.Errorf("duplicate key: %s", split[0]) + } + mv[split[0]] = split[1] + return nil +} + +func (mv mapVar) Type() string { + return "map" +} + +func installFlags(flags *pflag.FlagSet, c *Opts) { + flags.StringVar(&c.KubeConfigPath, "kubeconfig", c.KubeConfigPath, "kube config file to use for connecting to the Kubernetes API server") + flags.StringVar(&c.KubeNamespace, "namespace", c.KubeNamespace, "kubernetes namespace (default is 'all')") + flags.StringVar(&c.NodeName, "nodename", c.NodeName, "kubernetes node name") + flags.StringVar(&c.OperatingSystem, "os", c.OperatingSystem, "Operating System (Linux/Windows)") + flags.StringVar(&c.Provider, "provider", c.Provider, "cloud provider") + flags.StringVar(&c.ProviderConfigPath, "provider-config", c.ProviderConfigPath, "cloud provider configuration file") + flags.StringVar(&c.MetricsAddr, "metrics-addr", c.MetricsAddr, "address to listen for metrics/stats requests") + + flags.StringVar(&c.TaintKey, "taint", c.TaintKey, "Set node taint key") + flags.BoolVar(&c.DisableTaint, "disable-taint", c.DisableTaint, "disable the virtual-kubelet node taint") + flags.MarkDeprecated("taint", "Taint key should now be configured using the VK_TAINT_KEY environment variable") + + flags.IntVar(&c.PodSyncWorkers, "pod-sync-workers", c.PodSyncWorkers, `set the number of pod synchronization workers`) + flags.BoolVar(&c.EnableNodeLease, "enable-node-lease", c.EnableNodeLease, `use node leases (1.13) for node heartbeats`) + + flags.StringSliceVar(&c.TraceExporters, "trace-exporter", c.TraceExporters, fmt.Sprintf("sets the tracing exporter to use, available exporters: %s", opencensus.AvailableTraceExporters())) + flags.StringVar(&c.TraceConfig.ServiceName, "trace-service-name", c.TraceConfig.ServiceName, "sets the name of the service used to register with the trace exporter") + flags.Var(mapVar(c.TraceConfig.Tags), "trace-tag", "add tags to include with traces in key=value form") + flags.StringVar(&c.TraceSampleRate, "trace-sample-rate", c.TraceSampleRate, "set probability of tracing samples") + + flags.DurationVar(&c.InformerResyncPeriod, "full-resync-period", c.InformerResyncPeriod, "how often to perform a full resync of pods between kubernetes and the provider") + +} + +func getEnv(key, defaultValue string) string { + value, found := os.LookupEnv(key) + if found { + return value + } + return defaultValue +} diff --git a/cmd/http.go b/cmd/virtual-kubelet/commands/root/http.go similarity index 76% rename from cmd/http.go rename to cmd/virtual-kubelet/commands/root/http.go index 6c911da65..c71637366 100644 --- a/cmd/http.go +++ b/cmd/virtual-kubelet/commands/root/http.go @@ -1,4 +1,18 @@ -package cmd +// Copyright © 2017 The virtual-kubelet authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package root import ( "context" @@ -8,11 +22,10 @@ import ( "net" "net/http" "os" - "strconv" - "github.com/cpuguy83/strongerrors" "github.com/pkg/errors" "github.com/virtual-kubelet/virtual-kubelet/log" + "github.com/virtual-kubelet/virtual-kubelet/providers" "github.com/virtual-kubelet/virtual-kubelet/vkubelet" ) @@ -44,7 +57,7 @@ func loadTLSConfig(certPath, keyPath string) (*tls.Config, error) { }, nil } -func setupHTTPServer(ctx context.Context, cfg *apiServerConfig) (cancel func(), retErr error) { +func setupHTTPServer(ctx context.Context, p providers.Provider, cfg *apiServerConfig) (cancel func(), retErr error) { var closers []io.Closer cancel = func() { for _, c := range closers { @@ -121,18 +134,14 @@ type apiServerConfig struct { MetricsAddr string } -func getAPIConfig(metricsAddr string) (*apiServerConfig, error) { +func getAPIConfig(c Opts) (*apiServerConfig, error) { config := apiServerConfig{ CertPath: os.Getenv("APISERVER_CERT_LOCATION"), KeyPath: os.Getenv("APISERVER_KEY_LOCATION"), } - port, err := strconv.Atoi(os.Getenv("KUBELET_PORT")) - if err != nil { - return nil, strongerrors.InvalidArgument(errors.Wrap(err, "error parsing KUBELET_PORT variable")) - } - config.Addr = fmt.Sprintf(":%d", port) - config.MetricsAddr = metricsAddr + config.Addr = fmt.Sprintf(":%d", c.ListenPort) + config.MetricsAddr = c.MetricsAddr return &config, nil } diff --git a/cmd/node.go b/cmd/virtual-kubelet/commands/root/node.go similarity index 51% rename from cmd/node.go rename to cmd/virtual-kubelet/commands/root/node.go index 67611bd77..c949abd64 100644 --- a/cmd/node.go +++ b/cmd/virtual-kubelet/commands/root/node.go @@ -1,11 +1,28 @@ -package cmd +// Copyright © 2017 The virtual-kubelet authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package root import ( "context" "strings" + "github.com/cpuguy83/strongerrors" + "github.com/pkg/errors" "github.com/virtual-kubelet/virtual-kubelet/providers" "github.com/virtual-kubelet/virtual-kubelet/version" + corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -54,3 +71,41 @@ func NodeFromProvider(ctx context.Context, name string, taint *v1.Taint, p provi } return node } + +// getTaint creates a taint using the provided key/value. +// Taint effect is read from the environment +// The taint key/value may be overwritten by the environment. +func getTaint(c Opts) (*corev1.Taint, error) { + value := c.Provider + + key := c.TaintKey + if key == "" { + key = DefaultTaintKey + } + + if c.TaintEffect == "" { + c.TaintEffect = DefaultTaintEffect + } + + key = getEnv("VKUBELET_TAINT_KEY", key) + value = getEnv("VKUBELET_TAINT_VALUE", value) + effectEnv := getEnv("VKUBELET_TAINT_EFFECT", string(c.TaintEffect)) + + var effect corev1.TaintEffect + switch effectEnv { + case "NoSchedule": + effect = corev1.TaintEffectNoSchedule + case "NoExecute": + effect = corev1.TaintEffectNoExecute + case "PreferNoSchedule": + effect = corev1.TaintEffectPreferNoSchedule + default: + return nil, strongerrors.InvalidArgument(errors.Errorf("taint effect %q is not supported", effectEnv)) + } + + return &corev1.Taint{ + Key: key, + Value: value, + Effect: effect, + }, nil +} diff --git a/cmd/virtual-kubelet/commands/root/opts.go b/cmd/virtual-kubelet/commands/root/opts.go new file mode 100644 index 000000000..41038faef --- /dev/null +++ b/cmd/virtual-kubelet/commands/root/opts.go @@ -0,0 +1,143 @@ +// Copyright © 2017 The virtual-kubelet authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package root + +import ( + "os" + "path/filepath" + "strconv" + "time" + + "github.com/mitchellh/go-homedir" + "github.com/pkg/errors" + "github.com/virtual-kubelet/virtual-kubelet/trace/opencensus" + corev1 "k8s.io/api/core/v1" +) + +// Defaults for root command options +const ( + DefaultNodeName = "virtual-kubelet" + DefaultOperatingSystem = "Linux" + DefaultInformerResyncPeriod = 1 * time.Minute + DefaultMetricsAddr = ":10255" + DefaultListenPort = 10250 // TODO(cpuguy83)(VK1.0): Change this to an addr instead of just a port.. we should not be listening on all interfaces. + DefaultPodSyncWorkers = 10 + DefaultKubeNamespace = corev1.NamespaceAll + + DefaultTaintEffect = string(corev1.TaintEffectNoSchedule) + DefaultTaintKey = "virtual-kubelet.io/provider" +) + +// Opts stores all the options for configuring the root virtual-kubelet command. +// It is used for setting flag values. +// +// You can set the default options by creating a new `Opts` struct and passing +// it into `SetDefaultOpts` +type Opts struct { + // Path to the kubeconfig to use to connect to the Kubernetes API server. + KubeConfigPath string + // Namespace to watch for pods and other resources + KubeNamespace string + // Sets the port to listen for requests from the Kubernetes API server + ListenPort int32 + + // Node name to use when creating a node in Kubernetes + NodeName string + + // Operating system to run pods for + OperatingSystem string + + Provider string + ProviderConfigPath string + + TaintKey string + TaintEffect string + DisableTaint bool + + MetricsAddr string + + // Number of workers to use to handle pod notifications + PodSyncWorkers int + InformerResyncPeriod time.Duration + + // Use node leases when supported by Kubernetes (instead of node status updates) + EnableNodeLease bool + + TraceExporters []string + TraceSampleRate string + TraceConfig opencensus.TracingExporterOptions +} + +// SetDefaultOpts sets default options for unset values on the passed in option struct. +// Fields tht are already set will not be modified. +func SetDefaultOpts(c *Opts) error { + if c.OperatingSystem == "" { + c.OperatingSystem = DefaultOperatingSystem + } + + if c.NodeName == "" { + c.NodeName = getEnv("DEFAULT_NODE_NAME", DefaultNodeName) + } + + if c.InformerResyncPeriod == 0 { + c.InformerResyncPeriod = DefaultInformerResyncPeriod + } + + if c.MetricsAddr == "" { + c.MetricsAddr = DefaultMetricsAddr + } + + if c.PodSyncWorkers == 0 { + c.PodSyncWorkers = DefaultPodSyncWorkers + } + + if c.TraceConfig.ServiceName == "" { + c.TraceConfig.ServiceName = DefaultNodeName + } + + if c.ListenPort == 0 { + if kp := os.Getenv("KUBELET_PORT"); kp != "" { + p, err := strconv.Atoi(kp) + if err != nil { + return errors.Wrap(err, "error parsing KUBELET_PORT environment variable") + } + c.ListenPort = int32(p) + } + c.ListenPort = DefaultListenPort + } + + if c.KubeNamespace == "" { + c.KubeNamespace = DefaultKubeNamespace + } + + if c.TaintKey == "" { + c.TaintKey = DefaultTaintKey + } + if c.TaintEffect == "" { + c.TaintEffect = DefaultTaintEffect + } + + if c.KubeConfigPath == "" { + c.KubeConfigPath = os.Getenv("KUBECONFIG") + if c.KubeConfigPath == "" { + home, _ := homedir.Dir() + if home != "" { + c.KubeConfigPath = filepath.Join(home, ".kube", "config") + } + } + } + + return nil +} diff --git a/cmd/virtual-kubelet/commands/root/root.go b/cmd/virtual-kubelet/commands/root/root.go new file mode 100644 index 000000000..0e207814d --- /dev/null +++ b/cmd/virtual-kubelet/commands/root/root.go @@ -0,0 +1,204 @@ +// Copyright © 2017 The virtual-kubelet authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package root + +import ( + "context" + "os" + + "github.com/cpuguy83/strongerrors" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/virtual-kubelet/virtual-kubelet/log" + "github.com/virtual-kubelet/virtual-kubelet/manager" + "github.com/virtual-kubelet/virtual-kubelet/providers" + "github.com/virtual-kubelet/virtual-kubelet/providers/register" + "github.com/virtual-kubelet/virtual-kubelet/vkubelet" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + kubeinformers "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +// NewCommand creates a new top-level command. +// This command is used to start the virtual-kubelet daemon +func NewCommand(ctx context.Context, name string, c Opts) *cobra.Command { + cmd := &cobra.Command{ + Use: name, + Short: name + " provides a virtual kubelet interface for your kubernetes cluster.", + Long: name + ` implements the Kubelet interface with a pluggable +backend implementation allowing users to create kubernetes nodes without running the kubelet. +This allows users to schedule kubernetes workloads on nodes that aren't running Kubernetes.`, + RunE: func(cmd *cobra.Command, args []string) error { + return runRootCommand(ctx, c) + }, + } + + installFlags(cmd.Flags(), &c) + return cmd +} + +func runRootCommand(ctx context.Context, c Opts) error { + if ok := providers.ValidOperatingSystems[c.OperatingSystem]; !ok { + return strongerrors.InvalidArgument(errors.Errorf("operating system %q is not supported", c.OperatingSystem)) + } + + if c.PodSyncWorkers == 0 { + return strongerrors.InvalidArgument(errors.New("pod sync workers must be greater than 0")) + } + + var taint *corev1.Taint + if !c.DisableTaint { + var err error + taint, err = getTaint(c) + if err != nil { + return err + } + } + + client, err := newClient(c.KubeConfigPath) + if err != nil { + return err + } + + // Create a shared informer factory for Kubernetes pods in the current namespace (if specified) and scheduled to the current node. + podInformerFactory := kubeinformers.NewSharedInformerFactoryWithOptions( + client, + c.InformerResyncPeriod, + kubeinformers.WithNamespace(c.KubeNamespace), + kubeinformers.WithTweakListOptions(func(options *metav1.ListOptions) { + options.FieldSelector = fields.OneTermEqualSelector("spec.nodeName", c.NodeName).String() + })) + // Create a pod informer so we can pass its lister to the resource manager. + podInformer := podInformerFactory.Core().V1().Pods() + + // Create another shared informer factory for Kubernetes secrets and configmaps (not subject to any selectors). + scmInformerFactory := kubeinformers.NewSharedInformerFactoryWithOptions(client, c.InformerResyncPeriod) + // Create a secret informer and a config map informer so we can pass their listers to the resource manager. + secretInformer := scmInformerFactory.Core().V1().Secrets() + configMapInformer := scmInformerFactory.Core().V1().ConfigMaps() + serviceInformer := scmInformerFactory.Core().V1().Services() + + go podInformerFactory.Start(ctx.Done()) + go scmInformerFactory.Start(ctx.Done()) + + rm, err := manager.NewResourceManager(podInformer.Lister(), secretInformer.Lister(), configMapInformer.Lister(), serviceInformer.Lister()) + if err != nil { + return errors.Wrap(err, "could not create resource manager") + } + + apiConfig, err := getAPIConfig(c) + if err != nil { + return err + } + + if err := setupTracing(ctx, c); err != nil { + return err + } + + initConfig := register.InitConfig{ + ConfigPath: c.ProviderConfigPath, + NodeName: c.NodeName, + OperatingSystem: c.OperatingSystem, + ResourceManager: rm, + DaemonPort: int32(c.ListenPort), + InternalIP: os.Getenv("VKUBELET_POD_IP"), + } + + p, err := register.GetProvider(c.Provider, initConfig) + if err != nil { + return err + } + + ctx = log.WithLogger(ctx, log.G(ctx).WithFields(log.Fields{ + "provider": c.Provider, + "operatingSystem": c.OperatingSystem, + "node": c.NodeName, + "watchedNamespace": c.KubeNamespace, + })) + + pNode := NodeFromProvider(ctx, c.NodeName, taint, p) + node, err := vkubelet.NewNode( + vkubelet.NaiveNodeProvider{}, + pNode, + client.Coordination().Leases(corev1.NamespaceNodeLease), + client.CoreV1().Nodes(), + vkubelet.WithNodeDisableLease(!c.EnableNodeLease), + ) + if err != nil { + log.G(ctx).Fatal(err) + } + + vk := vkubelet.New(vkubelet.Config{ + Client: client, + Namespace: c.KubeNamespace, + NodeName: pNode.Name, + Provider: p, + ResourceManager: rm, + PodSyncWorkers: c.PodSyncWorkers, + PodInformer: podInformer, + }) + + cancelHTTP, err := setupHTTPServer(ctx, p, apiConfig) + if err != nil { + return err + } + defer cancelHTTP() + + go func() { + if err := vk.Run(ctx); err != nil && errors.Cause(err) != context.Canceled { + log.G(ctx).Fatal(err) + } + }() + + go func() { + if err := node.Run(ctx); err != nil { + log.G(ctx).Fatal(err) + } + }() + + log.G(ctx).Info("Initialized") + + <-ctx.Done() + return nil +} + +func newClient(configPath string) (*kubernetes.Clientset, error) { + var config *rest.Config + + // Check if the kubeConfig file exists. + if _, err := os.Stat(configPath); !os.IsNotExist(err) { + // Get the kubeconfig from the filepath. + config, err = clientcmd.BuildConfigFromFlags("", configPath) + if err != nil { + return nil, errors.Wrap(err, "error building client config") + } + } else { + // Set to in-cluster config. + config, err = rest.InClusterConfig() + if err != nil { + return nil, errors.Wrap(err, "error building in cluster config") + } + } + + if masterURI := os.Getenv("MASTER_URI"); masterURI != "" { + config.Host = masterURI + } + + return kubernetes.NewForConfig(config) +} diff --git a/cmd/virtual-kubelet/commands/root/tracing.go b/cmd/virtual-kubelet/commands/root/tracing.go new file mode 100644 index 000000000..bad60bca8 --- /dev/null +++ b/cmd/virtual-kubelet/commands/root/tracing.go @@ -0,0 +1,102 @@ +// Copyright © 2017 The virtual-kubelet authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package root + +import ( + "context" + "net/http" + "os" + "strconv" + "strings" + + "github.com/cpuguy83/strongerrors" + "github.com/pkg/errors" + "github.com/virtual-kubelet/virtual-kubelet/log" + "github.com/virtual-kubelet/virtual-kubelet/trace/opencensus" + octrace "go.opencensus.io/trace" + "go.opencensus.io/zpages" +) + +var ( + reservedTagNames = map[string]bool{ + "operatingSystem": true, + "provider": true, + "nodeName": true, + } +) + +func setupTracing(ctx context.Context, c Opts) error { + for k := range c.TraceConfig.Tags { + if reservedTagNames[k] { + return strongerrors.InvalidArgument(errors.Errorf("invalid trace tag %q, must not use a reserved tag key")) + } + } + if c.TraceConfig.Tags == nil { + c.TraceConfig.Tags = make(map[string]string, 3) + } + c.TraceConfig.Tags["operatingSystem"] = c.OperatingSystem + c.TraceConfig.Tags["provider"] = c.Provider + c.TraceConfig.Tags["nodeName"] = c.NodeName + for _, e := range c.TraceExporters { + if e == "zpages" { + go setupZpages(ctx) + continue + } + exporter, err := opencensus.GetTracingExporter(e, c.TraceConfig) + if err != nil { + return err + } + octrace.RegisterExporter(exporter) + } + if len(c.TraceExporters) > 0 { + var s octrace.Sampler + switch strings.ToLower(c.TraceSampleRate) { + case "": + case "always": + s = octrace.AlwaysSample() + case "never": + s = octrace.NeverSample() + default: + rate, err := strconv.Atoi(c.TraceSampleRate) + if err != nil { + return strongerrors.InvalidArgument(errors.Wrap(err, "unsupported trace sample rate")) + } + if rate < 0 || rate > 100 { + return strongerrors.InvalidArgument(errors.Wrap(err, "trace sample rate must be between 0 and 100")) + } + s = octrace.ProbabilitySampler(float64(rate) / 100) + } + + if s != nil { + octrace.ApplyConfig( + octrace.Config{ + DefaultSampler: s, + }, + ) + } + } + + return nil +} + +func setupZpages(ctx context.Context) { + p := os.Getenv("ZPAGES_PORT") + if p == "" { + log.G(ctx).Error("Missing ZPAGES_PORT env var, cannot setup zpages endpoint") + } + mux := http.NewServeMux() + zpages.Handle(mux, "/debug") + http.ListenAndServe(p, mux) +} diff --git a/main.go b/cmd/virtual-kubelet/commands/version/version.go similarity index 56% rename from main.go rename to cmd/virtual-kubelet/commands/version/version.go index c38c743cf..73f3e3967 100644 --- a/main.go +++ b/cmd/virtual-kubelet/commands/version/version.go @@ -12,10 +12,23 @@ // See the License for the specific language governing permissions and // limitations under the License. -package main +package version -import "github.com/virtual-kubelet/virtual-kubelet/cmd" +import ( + "fmt" -func main() { - cmd.Execute() + "github.com/spf13/cobra" + "github.com/virtual-kubelet/virtual-kubelet/version" +) + +// NewCommand creates a new version subcommand command +func NewCommand() *cobra.Command { + return &cobra.Command{ + Use: "version", + Short: "Show the version of the program", + Long: `Show the version of the program`, + Run: func(cmd *cobra.Command, args []string) { + fmt.Printf("Version: %s, Built: %s\n", version.Version, version.BuildTime) + }, + } } diff --git a/cmd/virtual-kubelet/main.go b/cmd/virtual-kubelet/main.go new file mode 100644 index 000000000..cdac76441 --- /dev/null +++ b/cmd/virtual-kubelet/main.go @@ -0,0 +1,68 @@ +package main + +import ( + "context" + "os" + "os/signal" + "path/filepath" + "syscall" + + "github.com/Sirupsen/logrus" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/virtual-kubelet/virtual-kubelet/cmd/virtual-kubelet/commands/providers" + "github.com/virtual-kubelet/virtual-kubelet/cmd/virtual-kubelet/commands/root" + "github.com/virtual-kubelet/virtual-kubelet/cmd/virtual-kubelet/commands/version" + "github.com/virtual-kubelet/virtual-kubelet/log" + logruslogger "github.com/virtual-kubelet/virtual-kubelet/log/logrus" + "github.com/virtual-kubelet/virtual-kubelet/trace" + "github.com/virtual-kubelet/virtual-kubelet/trace/opencensus" +) + +func main() { + ctx, cancel := context.WithCancel(context.Background()) + sig := make(chan os.Signal, 1) + signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM) + go func() { + <-sig + cancel() + }() + + log.L = logruslogger.FromLogrus(logrus.NewEntry(logrus.StandardLogger())) + trace.T = opencensus.Adapter{} + + var opts root.Opts + optsErr := root.SetDefaultOpts(&opts) + + rootCmd := root.NewCommand(ctx, filepath.Base(os.Args[0]), opts) + rootCmd.AddCommand(version.NewCommand(), providers.NewCommand()) + preRun := rootCmd.PreRunE + + var logLevel string + rootCmd.PreRunE = func(cmd *cobra.Command, args []string) error { + if optsErr != nil { + return optsErr + } + if preRun != nil { + return preRun(cmd, args) + } + return nil + } + + rootCmd.PersistentFlags().StringVar(&logLevel, "log-level", "info", `set the log level, e.g. "debug", "info", "warn", "error"`) + + rootCmd.PersistentPreRunE = func(cmd *cobra.Command, args []string) error { + if logLevel != "" { + lvl, err := logrus.ParseLevel(logLevel) + if err != nil { + return errors.Wrap(err, "could not parse log level") + } + logrus.SetLevel(lvl) + } + return nil + } + + if err := rootCmd.Execute(); err != nil && errors.Cause(err) != context.Canceled { + log.G(ctx).Fatal(err) + } +} diff --git a/cmd/census_jaeger.go b/trace/opencensus/jaeger.go similarity index 97% rename from cmd/census_jaeger.go rename to trace/opencensus/jaeger.go index d4d388206..c0461bea0 100644 --- a/cmd/census_jaeger.go +++ b/trace/opencensus/jaeger.go @@ -1,6 +1,6 @@ // +build !no_jaeger_exporter -package cmd +package opencensus import ( "errors" diff --git a/cmd/census_ocagent.go b/trace/opencensus/ocagent.go similarity index 98% rename from cmd/census_ocagent.go rename to trace/opencensus/ocagent.go index 6ad155890..8c813eb50 100644 --- a/cmd/census_ocagent.go +++ b/trace/opencensus/ocagent.go @@ -1,6 +1,6 @@ // +build !no_ocagent_exporter -package cmd +package opencensus import ( "os" diff --git a/cmd/census_tracing.go b/trace/opencensus/register.go similarity index 71% rename from cmd/census_tracing.go rename to trace/opencensus/register.go index 8ffdd5854..2a74a95a4 100644 --- a/cmd/census_tracing.go +++ b/trace/opencensus/register.go @@ -1,33 +1,20 @@ -package cmd +package opencensus import ( - "context" - "net/http" - "os" - "github.com/cpuguy83/strongerrors" "github.com/pkg/errors" - "github.com/virtual-kubelet/virtual-kubelet/log" "go.opencensus.io/trace" - "go.opencensus.io/zpages" ) -var ( - tracingExporters = make(map[string]TracingExporterInitFunc) - - reservedTagNames = map[string]bool{ - "operatingSystem": true, - "provider": true, - "nodeName": true, - } -) - -// TracingExporterOptions is used to pass options to the configured tracer type TracingExporterOptions struct { Tags map[string]string ServiceName string } +var ( + tracingExporters = make(map[string]TracingExporterInitFunc) +) + // TracingExporterInitFunc is the function that is called to initialize an exporter. // This is used when registering an exporter and called when a user specifed they want to use the exporter. type TracingExporterInitFunc func(TracingExporterOptions) (trace.Exporter, error) @@ -56,14 +43,3 @@ func AvailableTraceExporters() []string { } return out } - -func setupZpages() { - ctx := context.TODO() - p := os.Getenv("ZPAGES_PORT") - if p == "" { - log.G(ctx).Error("Missing ZPAGES_PORT env var, cannot setup zpages endpoint") - } - mux := http.NewServeMux() - zpages.Handle(mux, "/debug") - http.ListenAndServe(p, mux) -} diff --git a/cmd/census_test.go b/trace/opencensus/register_test.go similarity index 98% rename from cmd/census_test.go rename to trace/opencensus/register_test.go index d06c16d63..140f0ba77 100644 --- a/cmd/census_test.go +++ b/trace/opencensus/register_test.go @@ -1,4 +1,4 @@ -package cmd +package opencensus import ( "testing" diff --git a/vendor/github.com/fsnotify/fsnotify/AUTHORS b/vendor/github.com/fsnotify/fsnotify/AUTHORS deleted file mode 100644 index 5ab5d41c5..000000000 --- a/vendor/github.com/fsnotify/fsnotify/AUTHORS +++ /dev/null @@ -1,52 +0,0 @@ -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# You can update this list using the following command: -# -# $ git shortlog -se | awk '{print $2 " " $3 " " $4}' - -# Please keep the list sorted. - -Aaron L -Adrien Bustany -Amit Krishnan -Anmol Sethi -Bjørn Erik Pedersen -Bruno Bigras -Caleb Spare -Case Nelson -Chris Howey -Christoffer Buchholz -Daniel Wagner-Hall -Dave Cheney -Evan Phoenix -Francisco Souza -Hari haran -John C Barstow -Kelvin Fo -Ken-ichirou MATSUZAWA -Matt Layher -Nathan Youngman -Nickolai Zeldovich -Patrick -Paul Hammond -Pawel Knap -Pieter Droogendijk -Pursuit92 -Riku Voipio -Rob Figueiredo -Rodrigo Chiossi -Slawek Ligus -Soge Zhang -Tiffany Jernigan -Tilak Sharma -Tom Payne -Travis Cline -Tudor Golubenco -Vahe Khachikyan -Yukang -bronze1man -debrando -henrikedwards -铁哥 diff --git a/vendor/github.com/fsnotify/fsnotify/LICENSE b/vendor/github.com/fsnotify/fsnotify/LICENSE deleted file mode 100644 index f21e54080..000000000 --- a/vendor/github.com/fsnotify/fsnotify/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2012 fsnotify Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/fsnotify/fsnotify/fen.go b/vendor/github.com/fsnotify/fsnotify/fen.go deleted file mode 100644 index ced39cb88..000000000 --- a/vendor/github.com/fsnotify/fsnotify/fen.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build solaris - -package fsnotify - -import ( - "errors" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - return nil -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - return nil -} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go deleted file mode 100644 index 190bf0de5..000000000 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !plan9 - -// Package fsnotify provides a platform-independent interface for file system notifications. -package fsnotify - -import ( - "bytes" - "errors" - "fmt" -) - -// Event represents a single file system notification. -type Event struct { - Name string // Relative path to the file or directory. - Op Op // File operation that triggered the event. -} - -// Op describes a set of file operations. -type Op uint32 - -// These are the generalized file operations that can trigger a notification. -const ( - Create Op = 1 << iota - Write - Remove - Rename - Chmod -) - -func (op Op) String() string { - // Use a buffer for efficient string concatenation - var buffer bytes.Buffer - - if op&Create == Create { - buffer.WriteString("|CREATE") - } - if op&Remove == Remove { - buffer.WriteString("|REMOVE") - } - if op&Write == Write { - buffer.WriteString("|WRITE") - } - if op&Rename == Rename { - buffer.WriteString("|RENAME") - } - if op&Chmod == Chmod { - buffer.WriteString("|CHMOD") - } - if buffer.Len() == 0 { - return "" - } - return buffer.String()[1:] // Strip leading pipe -} - -// String returns a string representation of the event in the form -// "file: REMOVE|WRITE|..." -func (e Event) String() string { - return fmt.Sprintf("%q: %s", e.Name, e.Op.String()) -} - -// Common errors that can be reported by a watcher -var ErrEventOverflow = errors.New("fsnotify queue overflow") diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go deleted file mode 100644 index d9fd1b88a..000000000 --- a/vendor/github.com/fsnotify/fsnotify/inotify.go +++ /dev/null @@ -1,337 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux - -package fsnotify - -import ( - "errors" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "sync" - "unsafe" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - mu sync.Mutex // Map access - fd int - poller *fdPoller - watches map[string]*watch // Map of inotify watches (key: path) - paths map[int]string // Map of watched paths (key: watch descriptor) - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - doneResp chan struct{} // Channel to respond to Close -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - // Create inotify fd - fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC) - if fd == -1 { - return nil, errno - } - // Create epoll - poller, err := newFdPoller(fd) - if err != nil { - unix.Close(fd) - return nil, err - } - w := &Watcher{ - fd: fd, - poller: poller, - watches: make(map[string]*watch), - paths: make(map[int]string), - Events: make(chan Event), - Errors: make(chan error), - done: make(chan struct{}), - doneResp: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -func (w *Watcher) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - if w.isClosed() { - return nil - } - - // Send 'close' signal to goroutine, and set the Watcher to closed. - close(w.done) - - // Wake up goroutine - w.poller.wake() - - // Wait for goroutine to close - <-w.doneResp - - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - name = filepath.Clean(name) - if w.isClosed() { - return errors.New("inotify instance already closed") - } - - const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | - unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | - unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF - - var flags uint32 = agnosticEvents - - w.mu.Lock() - defer w.mu.Unlock() - watchEntry := w.watches[name] - if watchEntry != nil { - flags |= watchEntry.flags | unix.IN_MASK_ADD - } - wd, errno := unix.InotifyAddWatch(w.fd, name, flags) - if wd == -1 { - return errno - } - - if watchEntry == nil { - w.watches[name] = &watch{wd: uint32(wd), flags: flags} - w.paths[wd] = name - } else { - watchEntry.wd = uint32(wd) - watchEntry.flags = flags - } - - return nil -} - -// Remove stops watching the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - - // Fetch the watch. - w.mu.Lock() - defer w.mu.Unlock() - watch, ok := w.watches[name] - - // Remove it from inotify. - if !ok { - return fmt.Errorf("can't remove non-existent inotify watch for: %s", name) - } - - // We successfully removed the watch if InotifyRmWatch doesn't return an - // error, we need to clean up our internal state to ensure it matches - // inotify's kernel state. - delete(w.paths, int(watch.wd)) - delete(w.watches, name) - - // inotify_rm_watch will return EINVAL if the file has been deleted; - // the inotify will already have been removed. - // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously - // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE - // so that EINVAL means that the wd is being rm_watch()ed or its file removed - // by another thread and we have not received IN_IGNORE event. - success, errno := unix.InotifyRmWatch(w.fd, watch.wd) - if success == -1 { - // TODO: Perhaps it's not helpful to return an error here in every case. - // the only two possible errors are: - // EBADF, which happens when w.fd is not a valid file descriptor of any kind. - // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor. - // Watch descriptors are invalidated when they are removed explicitly or implicitly; - // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted. - return errno - } - - return nil -} - -type watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) -} - -// readEvents reads from the inotify file descriptor, converts the -// received events into Event objects and sends them via the Events channel -func (w *Watcher) readEvents() { - var ( - buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events - n int // Number of bytes read with read() - errno error // Syscall errno - ok bool // For poller.wait - ) - - defer close(w.doneResp) - defer close(w.Errors) - defer close(w.Events) - defer unix.Close(w.fd) - defer w.poller.close() - - for { - // See if we have been closed. - if w.isClosed() { - return - } - - ok, errno = w.poller.wait() - if errno != nil { - select { - case w.Errors <- errno: - case <-w.done: - return - } - continue - } - - if !ok { - continue - } - - n, errno = unix.Read(w.fd, buf[:]) - // If a signal interrupted execution, see if we've been asked to close, and try again. - // http://man7.org/linux/man-pages/man7/signal.7.html : - // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable" - if errno == unix.EINTR { - continue - } - - // unix.Read might have been woken up by Close. If so, we're done. - if w.isClosed() { - return - } - - if n < unix.SizeofInotifyEvent { - var err error - if n == 0 { - // If EOF is received. This should really never happen. - err = io.EOF - } else if n < 0 { - // If an error occurred while reading. - err = errno - } else { - // Read was too short. - err = errors.New("notify: short read in readEvents()") - } - select { - case w.Errors <- err: - case <-w.done: - return - } - continue - } - - var offset uint32 - // We don't know how many events we just read into the buffer - // While the offset points to at least one whole event... - for offset <= uint32(n-unix.SizeofInotifyEvent) { - // Point "raw" to the event in the buffer - raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) - - mask := uint32(raw.Mask) - nameLen := uint32(raw.Len) - - if mask&unix.IN_Q_OVERFLOW != 0 { - select { - case w.Errors <- ErrEventOverflow: - case <-w.done: - return - } - } - - // If the event happened to the watched directory or the watched file, the kernel - // doesn't append the filename to the event, but we would like to always fill the - // the "Name" field with a valid filename. We retrieve the path of the watch from - // the "paths" map. - w.mu.Lock() - name, ok := w.paths[int(raw.Wd)] - // IN_DELETE_SELF occurs when the file/directory being watched is removed. - // This is a sign to clean up the maps, otherwise we are no longer in sync - // with the inotify kernel state which has already deleted the watch - // automatically. - if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { - delete(w.paths, int(raw.Wd)) - delete(w.watches, name) - } - w.mu.Unlock() - - if nameLen > 0 { - // Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent])) - // The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") - } - - event := newEvent(name, mask) - - // Send the events that are not ignored on the events channel - if !event.ignoreLinux(mask) { - select { - case w.Events <- event: - case <-w.done: - return - } - } - - // Move to the next event in the buffer - offset += unix.SizeofInotifyEvent + nameLen - } - } -} - -// Certain types of events can be "ignored" and not sent over the Events -// channel. Such as events marked ignore by the kernel, or MODIFY events -// against files that do not exist. -func (e *Event) ignoreLinux(mask uint32) bool { - // Ignore anything the inotify API says to ignore - if mask&unix.IN_IGNORED == unix.IN_IGNORED { - return true - } - - // If the event is not a DELETE or RENAME, the file must exist. - // Otherwise the event is ignored. - // *Note*: this was put in place because it was seen that a MODIFY - // event was sent after the DELETE. This ignores that MODIFY and - // assumes a DELETE will come or has come if the file doesn't exist. - if !(e.Op&Remove == Remove || e.Op&Rename == Rename) { - _, statErr := os.Lstat(e.Name) - return os.IsNotExist(statErr) - } - return false -} - -// newEvent returns an platform-independent Event based on an inotify mask. -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { - e.Op |= Create - } - if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { - e.Op |= Remove - } - if mask&unix.IN_MODIFY == unix.IN_MODIFY { - e.Op |= Write - } - if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { - e.Op |= Rename - } - if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { - e.Op |= Chmod - } - return e -} diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go deleted file mode 100644 index cc7db4b22..000000000 --- a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux - -package fsnotify - -import ( - "errors" - - "golang.org/x/sys/unix" -) - -type fdPoller struct { - fd int // File descriptor (as returned by the inotify_init() syscall) - epfd int // Epoll file descriptor - pipe [2]int // Pipe for waking up -} - -func emptyPoller(fd int) *fdPoller { - poller := new(fdPoller) - poller.fd = fd - poller.epfd = -1 - poller.pipe[0] = -1 - poller.pipe[1] = -1 - return poller -} - -// Create a new inotify poller. -// This creates an inotify handler, and an epoll handler. -func newFdPoller(fd int) (*fdPoller, error) { - var errno error - poller := emptyPoller(fd) - defer func() { - if errno != nil { - poller.close() - } - }() - poller.fd = fd - - // Create epoll fd - poller.epfd, errno = unix.EpollCreate1(0) - if poller.epfd == -1 { - return nil, errno - } - // Create pipe; pipe[0] is the read end, pipe[1] the write end. - errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK) - if errno != nil { - return nil, errno - } - - // Register inotify fd with epoll - event := unix.EpollEvent{ - Fd: int32(poller.fd), - Events: unix.EPOLLIN, - } - errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event) - if errno != nil { - return nil, errno - } - - // Register pipe fd with epoll - event = unix.EpollEvent{ - Fd: int32(poller.pipe[0]), - Events: unix.EPOLLIN, - } - errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event) - if errno != nil { - return nil, errno - } - - return poller, nil -} - -// Wait using epoll. -// Returns true if something is ready to be read, -// false if there is not. -func (poller *fdPoller) wait() (bool, error) { - // 3 possible events per fd, and 2 fds, makes a maximum of 6 events. - // I don't know whether epoll_wait returns the number of events returned, - // or the total number of events ready. - // I decided to catch both by making the buffer one larger than the maximum. - events := make([]unix.EpollEvent, 7) - for { - n, errno := unix.EpollWait(poller.epfd, events, -1) - if n == -1 { - if errno == unix.EINTR { - continue - } - return false, errno - } - if n == 0 { - // If there are no events, try again. - continue - } - if n > 6 { - // This should never happen. More events were returned than should be possible. - return false, errors.New("epoll_wait returned more events than I know what to do with") - } - ready := events[:n] - epollhup := false - epollerr := false - epollin := false - for _, event := range ready { - if event.Fd == int32(poller.fd) { - if event.Events&unix.EPOLLHUP != 0 { - // This should not happen, but if it does, treat it as a wakeup. - epollhup = true - } - if event.Events&unix.EPOLLERR != 0 { - // If an error is waiting on the file descriptor, we should pretend - // something is ready to read, and let unix.Read pick up the error. - epollerr = true - } - if event.Events&unix.EPOLLIN != 0 { - // There is data to read. - epollin = true - } - } - if event.Fd == int32(poller.pipe[0]) { - if event.Events&unix.EPOLLHUP != 0 { - // Write pipe descriptor was closed, by us. This means we're closing down the - // watcher, and we should wake up. - } - if event.Events&unix.EPOLLERR != 0 { - // If an error is waiting on the pipe file descriptor. - // This is an absolute mystery, and should never ever happen. - return false, errors.New("Error on the pipe descriptor.") - } - if event.Events&unix.EPOLLIN != 0 { - // This is a regular wakeup, so we have to clear the buffer. - err := poller.clearWake() - if err != nil { - return false, err - } - } - } - } - - if epollhup || epollerr || epollin { - return true, nil - } - return false, nil - } -} - -// Close the write end of the poller. -func (poller *fdPoller) wake() error { - buf := make([]byte, 1) - n, errno := unix.Write(poller.pipe[1], buf) - if n == -1 { - if errno == unix.EAGAIN { - // Buffer is full, poller will wake. - return nil - } - return errno - } - return nil -} - -func (poller *fdPoller) clearWake() error { - // You have to be woken up a LOT in order to get to 100! - buf := make([]byte, 100) - n, errno := unix.Read(poller.pipe[0], buf) - if n == -1 { - if errno == unix.EAGAIN { - // Buffer is empty, someone else cleared our wake. - return nil - } - return errno - } - return nil -} - -// Close all poller file descriptors, but not the one passed to it. -func (poller *fdPoller) close() { - if poller.pipe[1] != -1 { - unix.Close(poller.pipe[1]) - } - if poller.pipe[0] != -1 { - unix.Close(poller.pipe[0]) - } - if poller.epfd != -1 { - unix.Close(poller.epfd) - } -} diff --git a/vendor/github.com/fsnotify/fsnotify/kqueue.go b/vendor/github.com/fsnotify/fsnotify/kqueue.go deleted file mode 100644 index 86e76a3d6..000000000 --- a/vendor/github.com/fsnotify/fsnotify/kqueue.go +++ /dev/null @@ -1,521 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build freebsd openbsd netbsd dragonfly darwin - -package fsnotify - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sync" - "time" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - - kq int // File descriptor (as returned by the kqueue() syscall). - - mu sync.Mutex // Protects access to watcher data - watches map[string]int // Map of watched file descriptors (key: path). - externalWatches map[string]bool // Map of watches added by user of the library. - dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue. - paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events. - fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events). - isClosed bool // Set to true when Close() is first called -} - -type pathInfo struct { - name string - isDir bool -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - kq, err := kqueue() - if err != nil { - return nil, err - } - - w := &Watcher{ - kq: kq, - watches: make(map[string]int), - dirFlags: make(map[string]uint32), - paths: make(map[int]pathInfo), - fileExists: make(map[string]bool), - externalWatches: make(map[string]bool), - Events: make(chan Event), - Errors: make(chan error), - done: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return nil - } - w.isClosed = true - - // copy paths to remove while locked - var pathsToRemove = make([]string, 0, len(w.watches)) - for name := range w.watches { - pathsToRemove = append(pathsToRemove, name) - } - w.mu.Unlock() - // unlock before calling Remove, which also locks - - for _, name := range pathsToRemove { - w.Remove(name) - } - - // send a "quit" message to the reader goroutine - close(w.done) - - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - w.mu.Lock() - w.externalWatches[name] = true - w.mu.Unlock() - _, err := w.addWatch(name, noteAllEvents) - return err -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - w.mu.Lock() - watchfd, ok := w.watches[name] - w.mu.Unlock() - if !ok { - return fmt.Errorf("can't remove non-existent kevent watch for: %s", name) - } - - const registerRemove = unix.EV_DELETE - if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil { - return err - } - - unix.Close(watchfd) - - w.mu.Lock() - isDir := w.paths[watchfd].isDir - delete(w.watches, name) - delete(w.paths, watchfd) - delete(w.dirFlags, name) - w.mu.Unlock() - - // Find all watched paths that are in this directory that are not external. - if isDir { - var pathsToRemove []string - w.mu.Lock() - for _, path := range w.paths { - wdir, _ := filepath.Split(path.name) - if filepath.Clean(wdir) == name { - if !w.externalWatches[path.name] { - pathsToRemove = append(pathsToRemove, path.name) - } - } - } - w.mu.Unlock() - for _, name := range pathsToRemove { - // Since these are internal, not much sense in propagating error - // to the user, as that will just confuse them with an error about - // a path they did not explicitly watch themselves. - w.Remove(name) - } - } - - return nil -} - -// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) -const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME - -// keventWaitTime to block on each read from kevent -var keventWaitTime = durationToTimespec(100 * time.Millisecond) - -// addWatch adds name to the watched file set. -// The flags are interpreted as described in kevent(2). -// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. -func (w *Watcher) addWatch(name string, flags uint32) (string, error) { - var isDir bool - // Make ./name and name equivalent - name = filepath.Clean(name) - - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return "", errors.New("kevent instance already closed") - } - watchfd, alreadyWatching := w.watches[name] - // We already have a watch, but we can still override flags. - if alreadyWatching { - isDir = w.paths[watchfd].isDir - } - w.mu.Unlock() - - if !alreadyWatching { - fi, err := os.Lstat(name) - if err != nil { - return "", err - } - - // Don't watch sockets. - if fi.Mode()&os.ModeSocket == os.ModeSocket { - return "", nil - } - - // Don't watch named pipes. - if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe { - return "", nil - } - - // Follow Symlinks - // Unfortunately, Linux can add bogus symlinks to watch list without - // issue, and Windows can't do symlinks period (AFAIK). To maintain - // consistency, we will act like everything is fine. There will simply - // be no file events for broken symlinks. - // Hence the returns of nil on errors. - if fi.Mode()&os.ModeSymlink == os.ModeSymlink { - name, err = filepath.EvalSymlinks(name) - if err != nil { - return "", nil - } - - w.mu.Lock() - _, alreadyWatching = w.watches[name] - w.mu.Unlock() - - if alreadyWatching { - return name, nil - } - - fi, err = os.Lstat(name) - if err != nil { - return "", nil - } - } - - watchfd, err = unix.Open(name, openMode, 0700) - if watchfd == -1 { - return "", err - } - - isDir = fi.IsDir() - } - - const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE - if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil { - unix.Close(watchfd) - return "", err - } - - if !alreadyWatching { - w.mu.Lock() - w.watches[name] = watchfd - w.paths[watchfd] = pathInfo{name: name, isDir: isDir} - w.mu.Unlock() - } - - if isDir { - // Watch the directory if it has not been watched before, - // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) - w.mu.Lock() - - watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && - (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) - // Store flags so this watch can be updated later - w.dirFlags[name] = flags - w.mu.Unlock() - - if watchDir { - if err := w.watchDirectoryFiles(name); err != nil { - return "", err - } - } - } - return name, nil -} - -// readEvents reads from kqueue and converts the received kevents into -// Event values that it sends down the Events channel. -func (w *Watcher) readEvents() { - eventBuffer := make([]unix.Kevent_t, 10) - -loop: - for { - // See if there is a message on the "done" channel - select { - case <-w.done: - break loop - default: - } - - // Get new events - kevents, err := read(w.kq, eventBuffer, &keventWaitTime) - // EINTR is okay, the syscall was interrupted before timeout expired. - if err != nil && err != unix.EINTR { - select { - case w.Errors <- err: - case <-w.done: - break loop - } - continue - } - - // Flush the events we received to the Events channel - for len(kevents) > 0 { - kevent := &kevents[0] - watchfd := int(kevent.Ident) - mask := uint32(kevent.Fflags) - w.mu.Lock() - path := w.paths[watchfd] - w.mu.Unlock() - event := newEvent(path.name, mask) - - if path.isDir && !(event.Op&Remove == Remove) { - // Double check to make sure the directory exists. This can happen when - // we do a rm -fr on a recursively watched folders and we receive a - // modification event first but the folder has been deleted and later - // receive the delete event - if _, err := os.Lstat(event.Name); os.IsNotExist(err) { - // mark is as delete event - event.Op |= Remove - } - } - - if event.Op&Rename == Rename || event.Op&Remove == Remove { - w.Remove(event.Name) - w.mu.Lock() - delete(w.fileExists, event.Name) - w.mu.Unlock() - } - - if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) { - w.sendDirectoryChangeEvents(event.Name) - } else { - // Send the event on the Events channel. - select { - case w.Events <- event: - case <-w.done: - break loop - } - } - - if event.Op&Remove == Remove { - // Look for a file that may have overwritten this. - // For example, mv f1 f2 will delete f2, then create f2. - if path.isDir { - fileDir := filepath.Clean(event.Name) - w.mu.Lock() - _, found := w.watches[fileDir] - w.mu.Unlock() - if found { - // make sure the directory exists before we watch for changes. When we - // do a recursive watch and perform rm -fr, the parent directory might - // have gone missing, ignore the missing directory and let the - // upcoming delete event remove the watch from the parent directory. - if _, err := os.Lstat(fileDir); err == nil { - w.sendDirectoryChangeEvents(fileDir) - } - } - } else { - filePath := filepath.Clean(event.Name) - if fileInfo, err := os.Lstat(filePath); err == nil { - w.sendFileCreatedEventIfNew(filePath, fileInfo) - } - } - } - - // Move to next event - kevents = kevents[1:] - } - } - - // cleanup - err := unix.Close(w.kq) - if err != nil { - // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors. - select { - case w.Errors <- err: - default: - } - } - close(w.Events) - close(w.Errors) -} - -// newEvent returns an platform-independent Event based on kqueue Fflags. -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { - e.Op |= Remove - } - if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { - e.Op |= Write - } - if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { - e.Op |= Rename - } - if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { - e.Op |= Chmod - } - return e -} - -func newCreateEvent(name string) Event { - return Event{Name: name, Op: Create} -} - -// watchDirectoryFiles to mimic inotify when adding a watch on a directory -func (w *Watcher) watchDirectoryFiles(dirPath string) error { - // Get all files - files, err := ioutil.ReadDir(dirPath) - if err != nil { - return err - } - - for _, fileInfo := range files { - filePath := filepath.Join(dirPath, fileInfo.Name()) - filePath, err = w.internalWatch(filePath, fileInfo) - if err != nil { - return err - } - - w.mu.Lock() - w.fileExists[filePath] = true - w.mu.Unlock() - } - - return nil -} - -// sendDirectoryEvents searches the directory for newly created files -// and sends them over the event channel. This functionality is to have -// the BSD version of fsnotify match Linux inotify which provides a -// create event for files created in a watched directory. -func (w *Watcher) sendDirectoryChangeEvents(dirPath string) { - // Get all files - files, err := ioutil.ReadDir(dirPath) - if err != nil { - select { - case w.Errors <- err: - case <-w.done: - return - } - } - - // Search for new files - for _, fileInfo := range files { - filePath := filepath.Join(dirPath, fileInfo.Name()) - err := w.sendFileCreatedEventIfNew(filePath, fileInfo) - - if err != nil { - return - } - } -} - -// sendFileCreatedEvent sends a create event if the file isn't already being tracked. -func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { - w.mu.Lock() - _, doesExist := w.fileExists[filePath] - w.mu.Unlock() - if !doesExist { - // Send create event - select { - case w.Events <- newCreateEvent(filePath): - case <-w.done: - return - } - } - - // like watchDirectoryFiles (but without doing another ReadDir) - filePath, err = w.internalWatch(filePath, fileInfo) - if err != nil { - return err - } - - w.mu.Lock() - w.fileExists[filePath] = true - w.mu.Unlock() - - return nil -} - -func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { - if fileInfo.IsDir() { - // mimic Linux providing delete events for subdirectories - // but preserve the flags used if currently watching subdirectory - w.mu.Lock() - flags := w.dirFlags[name] - w.mu.Unlock() - - flags |= unix.NOTE_DELETE | unix.NOTE_RENAME - return w.addWatch(name, flags) - } - - // watch file to mimic Linux inotify - return w.addWatch(name, noteAllEvents) -} - -// kqueue creates a new kernel event queue and returns a descriptor. -func kqueue() (kq int, err error) { - kq, err = unix.Kqueue() - if kq == -1 { - return kq, err - } - return kq, nil -} - -// register events with the queue -func register(kq int, fds []int, flags int, fflags uint32) error { - changes := make([]unix.Kevent_t, len(fds)) - - for i, fd := range fds { - // SetKevent converts int to the platform-specific types: - unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) - changes[i].Fflags = fflags - } - - // register the events - success, err := unix.Kevent(kq, changes, nil, nil) - if success == -1 { - return err - } - return nil -} - -// read retrieves pending events, or waits until an event occurs. -// A timeout of nil blocks indefinitely, while 0 polls the queue. -func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) { - n, err := unix.Kevent(kq, nil, events, timeout) - if err != nil { - return nil, err - } - return events[0:n], nil -} - -// durationToTimespec prepares a timeout value -func durationToTimespec(d time.Duration) unix.Timespec { - return unix.NsecToTimespec(d.Nanoseconds()) -} diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go deleted file mode 100644 index 7d8de1451..000000000 --- a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build freebsd openbsd netbsd dragonfly - -package fsnotify - -import "golang.org/x/sys/unix" - -const openMode = unix.O_NONBLOCK | unix.O_RDONLY diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go deleted file mode 100644 index 9139e1716..000000000 --- a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin - -package fsnotify - -import "golang.org/x/sys/unix" - -// note: this constant is not defined on BSD -const openMode = unix.O_EVTONLY diff --git a/vendor/github.com/fsnotify/fsnotify/windows.go b/vendor/github.com/fsnotify/fsnotify/windows.go deleted file mode 100644 index 09436f31d..000000000 --- a/vendor/github.com/fsnotify/fsnotify/windows.go +++ /dev/null @@ -1,561 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package fsnotify - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "runtime" - "sync" - "syscall" - "unsafe" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - isClosed bool // Set to true when Close() is first called - mu sync.Mutex // Map access - port syscall.Handle // Handle to completion port - watches watchMap // Map of watches (key: i-number) - input chan *input // Inputs to the reader are sent on this channel - quit chan chan<- error -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0) - if e != nil { - return nil, os.NewSyscallError("CreateIoCompletionPort", e) - } - w := &Watcher{ - port: port, - watches: make(watchMap), - input: make(chan *input, 1), - Events: make(chan Event, 50), - Errors: make(chan error), - quit: make(chan chan<- error, 1), - } - go w.readEvents() - return w, nil -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - if w.isClosed { - return nil - } - w.isClosed = true - - // Send "quit" message to the reader goroutine - ch := make(chan error) - w.quit <- ch - if err := w.wakeupReader(); err != nil { - return err - } - return <-ch -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - if w.isClosed { - return errors.New("watcher already closed") - } - in := &input{ - op: opAddWatch, - path: filepath.Clean(name), - flags: sysFSALLEVENTS, - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - in := &input{ - op: opRemoveWatch, - path: filepath.Clean(name), - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -const ( - // Options for AddWatch - sysFSONESHOT = 0x80000000 - sysFSONLYDIR = 0x1000000 - - // Events - sysFSACCESS = 0x1 - sysFSALLEVENTS = 0xfff - sysFSATTRIB = 0x4 - sysFSCLOSE = 0x18 - sysFSCREATE = 0x100 - sysFSDELETE = 0x200 - sysFSDELETESELF = 0x400 - sysFSMODIFY = 0x2 - sysFSMOVE = 0xc0 - sysFSMOVEDFROM = 0x40 - sysFSMOVEDTO = 0x80 - sysFSMOVESELF = 0x800 - - // Special events - sysFSIGNORED = 0x8000 - sysFSQOVERFLOW = 0x4000 -) - -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { - e.Op |= Create - } - if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { - e.Op |= Remove - } - if mask&sysFSMODIFY == sysFSMODIFY { - e.Op |= Write - } - if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { - e.Op |= Rename - } - if mask&sysFSATTRIB == sysFSATTRIB { - e.Op |= Chmod - } - return e -} - -const ( - opAddWatch = iota - opRemoveWatch -) - -const ( - provisional uint64 = 1 << (32 + iota) -) - -type input struct { - op int - path string - flags uint32 - reply chan error -} - -type inode struct { - handle syscall.Handle - volume uint32 - index uint64 -} - -type watch struct { - ov syscall.Overlapped - ino *inode // i-number - path string // Directory path - mask uint64 // Directory itself is being watched with these notify flags - names map[string]uint64 // Map of names being watched and their notify flags - rename string // Remembers the old name while renaming a file - buf [4096]byte -} - -type indexMap map[uint64]*watch -type watchMap map[uint32]indexMap - -func (w *Watcher) wakeupReader() error { - e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil) - if e != nil { - return os.NewSyscallError("PostQueuedCompletionStatus", e) - } - return nil -} - -func getDir(pathname string) (dir string, err error) { - attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname)) - if e != nil { - return "", os.NewSyscallError("GetFileAttributes", e) - } - if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { - dir = pathname - } else { - dir, _ = filepath.Split(pathname) - dir = filepath.Clean(dir) - } - return -} - -func getIno(path string) (ino *inode, err error) { - h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path), - syscall.FILE_LIST_DIRECTORY, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - nil, syscall.OPEN_EXISTING, - syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0) - if e != nil { - return nil, os.NewSyscallError("CreateFile", e) - } - var fi syscall.ByHandleFileInformation - if e = syscall.GetFileInformationByHandle(h, &fi); e != nil { - syscall.CloseHandle(h) - return nil, os.NewSyscallError("GetFileInformationByHandle", e) - } - ino = &inode{ - handle: h, - volume: fi.VolumeSerialNumber, - index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), - } - return ino, nil -} - -// Must run within the I/O thread. -func (m watchMap) get(ino *inode) *watch { - if i := m[ino.volume]; i != nil { - return i[ino.index] - } - return nil -} - -// Must run within the I/O thread. -func (m watchMap) set(ino *inode, watch *watch) { - i := m[ino.volume] - if i == nil { - i = make(indexMap) - m[ino.volume] = i - } - i[ino.index] = watch -} - -// Must run within the I/O thread. -func (w *Watcher) addWatch(pathname string, flags uint64) error { - dir, err := getDir(pathname) - if err != nil { - return err - } - if flags&sysFSONLYDIR != 0 && pathname != dir { - return nil - } - ino, err := getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watchEntry := w.watches.get(ino) - w.mu.Unlock() - if watchEntry == nil { - if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil { - syscall.CloseHandle(ino.handle) - return os.NewSyscallError("CreateIoCompletionPort", e) - } - watchEntry = &watch{ - ino: ino, - path: dir, - names: make(map[string]uint64), - } - w.mu.Lock() - w.watches.set(ino, watchEntry) - w.mu.Unlock() - flags |= provisional - } else { - syscall.CloseHandle(ino.handle) - } - if pathname == dir { - watchEntry.mask |= flags - } else { - watchEntry.names[filepath.Base(pathname)] |= flags - } - if err = w.startRead(watchEntry); err != nil { - return err - } - if pathname == dir { - watchEntry.mask &= ^provisional - } else { - watchEntry.names[filepath.Base(pathname)] &= ^provisional - } - return nil -} - -// Must run within the I/O thread. -func (w *Watcher) remWatch(pathname string) error { - dir, err := getDir(pathname) - if err != nil { - return err - } - ino, err := getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watch := w.watches.get(ino) - w.mu.Unlock() - if watch == nil { - return fmt.Errorf("can't remove non-existent watch for: %s", pathname) - } - if pathname == dir { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) - watch.mask = 0 - } else { - name := filepath.Base(pathname) - w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - return w.startRead(watch) -} - -// Must run within the I/O thread. -func (w *Watcher) deleteWatch(watch *watch) { - for name, mask := range watch.names { - if mask&provisional == 0 { - w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) - } - delete(watch.names, name) - } - if watch.mask != 0 { - if watch.mask&provisional == 0 { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) - } - watch.mask = 0 - } -} - -// Must run within the I/O thread. -func (w *Watcher) startRead(watch *watch) error { - if e := syscall.CancelIo(watch.ino.handle); e != nil { - w.Errors <- os.NewSyscallError("CancelIo", e) - w.deleteWatch(watch) - } - mask := toWindowsFlags(watch.mask) - for _, m := range watch.names { - mask |= toWindowsFlags(m) - } - if mask == 0 { - if e := syscall.CloseHandle(watch.ino.handle); e != nil { - w.Errors <- os.NewSyscallError("CloseHandle", e) - } - w.mu.Lock() - delete(w.watches[watch.ino.volume], watch.ino.index) - w.mu.Unlock() - return nil - } - e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], - uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) - if e != nil { - err := os.NewSyscallError("ReadDirectoryChanges", e) - if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { - // Watched directory was probably removed - if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) { - if watch.mask&sysFSONESHOT != 0 { - watch.mask = 0 - } - } - err = nil - } - w.deleteWatch(watch) - w.startRead(watch) - return err - } - return nil -} - -// readEvents reads from the I/O completion port, converts the -// received events into Event objects and sends them via the Events channel. -// Entry point to the I/O thread. -func (w *Watcher) readEvents() { - var ( - n, key uint32 - ov *syscall.Overlapped - ) - runtime.LockOSThread() - - for { - e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE) - watch := (*watch)(unsafe.Pointer(ov)) - - if watch == nil { - select { - case ch := <-w.quit: - w.mu.Lock() - var indexes []indexMap - for _, index := range w.watches { - indexes = append(indexes, index) - } - w.mu.Unlock() - for _, index := range indexes { - for _, watch := range index { - w.deleteWatch(watch) - w.startRead(watch) - } - } - var err error - if e := syscall.CloseHandle(w.port); e != nil { - err = os.NewSyscallError("CloseHandle", e) - } - close(w.Events) - close(w.Errors) - ch <- err - return - case in := <-w.input: - switch in.op { - case opAddWatch: - in.reply <- w.addWatch(in.path, uint64(in.flags)) - case opRemoveWatch: - in.reply <- w.remWatch(in.path) - } - default: - } - continue - } - - switch e { - case syscall.ERROR_MORE_DATA: - if watch == nil { - w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer") - } else { - // The i/o succeeded but the buffer is full. - // In theory we should be building up a full packet. - // In practice we can get away with just carrying on. - n = uint32(unsafe.Sizeof(watch.buf)) - } - case syscall.ERROR_ACCESS_DENIED: - // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) - w.deleteWatch(watch) - w.startRead(watch) - continue - case syscall.ERROR_OPERATION_ABORTED: - // CancelIo was called on this handle - continue - default: - w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e) - continue - case nil: - } - - var offset uint32 - for { - if n == 0 { - w.Events <- newEvent("", sysFSQOVERFLOW) - w.Errors <- errors.New("short read in readEvents()") - break - } - - // Point "raw" to the event in the buffer - raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) - buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName)) - name := syscall.UTF16ToString(buf[:raw.FileNameLength/2]) - fullname := filepath.Join(watch.path, name) - - var mask uint64 - switch raw.Action { - case syscall.FILE_ACTION_REMOVED: - mask = sysFSDELETESELF - case syscall.FILE_ACTION_MODIFIED: - mask = sysFSMODIFY - case syscall.FILE_ACTION_RENAMED_OLD_NAME: - watch.rename = name - case syscall.FILE_ACTION_RENAMED_NEW_NAME: - if watch.names[watch.rename] != 0 { - watch.names[name] |= watch.names[watch.rename] - delete(watch.names, watch.rename) - mask = sysFSMOVESELF - } - } - - sendNameEvent := func() { - if w.sendEvent(fullname, watch.names[name]&mask) { - if watch.names[name]&sysFSONESHOT != 0 { - delete(watch.names, name) - } - } - } - if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME { - sendNameEvent() - } - if raw.Action == syscall.FILE_ACTION_REMOVED { - w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) { - if watch.mask&sysFSONESHOT != 0 { - watch.mask = 0 - } - } - if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME { - fullname = filepath.Join(watch.path, watch.rename) - sendNameEvent() - } - - // Move to the next event in the buffer - if raw.NextEntryOffset == 0 { - break - } - offset += raw.NextEntryOffset - - // Error! - if offset >= n { - w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.") - break - } - } - - if err := w.startRead(watch); err != nil { - w.Errors <- err - } - } -} - -func (w *Watcher) sendEvent(name string, mask uint64) bool { - if mask == 0 { - return false - } - event := newEvent(name, uint32(mask)) - select { - case ch := <-w.quit: - w.quit <- ch - case w.Events <- event: - } - return true -} - -func toWindowsFlags(mask uint64) uint32 { - var m uint32 - if mask&sysFSACCESS != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS - } - if mask&sysFSMODIFY != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE - } - if mask&sysFSATTRIB != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES - } - if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME - } - return m -} - -func toFSnotifyFlags(action uint32) uint64 { - switch action { - case syscall.FILE_ACTION_ADDED: - return sysFSCREATE - case syscall.FILE_ACTION_REMOVED: - return sysFSDELETE - case syscall.FILE_ACTION_MODIFIED: - return sysFSMODIFY - case syscall.FILE_ACTION_RENAMED_OLD_NAME: - return sysFSMOVEDFROM - case syscall.FILE_ACTION_RENAMED_NEW_NAME: - return sysFSMOVEDTO - } - return 0 -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go b/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go deleted file mode 100644 index 7c038d12a..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go +++ /dev/null @@ -1,789 +0,0 @@ -package printer - -import ( - "bytes" - "fmt" - "sort" - - "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/hcl/hcl/token" -) - -const ( - blank = byte(' ') - newline = byte('\n') - tab = byte('\t') - infinity = 1 << 30 // offset or line -) - -var ( - unindent = []byte("\uE123") // in the private use space -) - -type printer struct { - cfg Config - prev token.Pos - - comments []*ast.CommentGroup // may be nil, contains all comments - standaloneComments []*ast.CommentGroup // contains all standalone comments (not assigned to any node) - - enableTrace bool - indentTrace int -} - -type ByPosition []*ast.CommentGroup - -func (b ByPosition) Len() int { return len(b) } -func (b ByPosition) Swap(i, j int) { b[i], b[j] = b[j], b[i] } -func (b ByPosition) Less(i, j int) bool { return b[i].Pos().Before(b[j].Pos()) } - -// collectComments comments all standalone comments which are not lead or line -// comment -func (p *printer) collectComments(node ast.Node) { - // first collect all comments. This is already stored in - // ast.File.(comments) - ast.Walk(node, func(nn ast.Node) (ast.Node, bool) { - switch t := nn.(type) { - case *ast.File: - p.comments = t.Comments - return nn, false - } - return nn, true - }) - - standaloneComments := make(map[token.Pos]*ast.CommentGroup, 0) - for _, c := range p.comments { - standaloneComments[c.Pos()] = c - } - - // next remove all lead and line comments from the overall comment map. - // This will give us comments which are standalone, comments which are not - // assigned to any kind of node. - ast.Walk(node, func(nn ast.Node) (ast.Node, bool) { - switch t := nn.(type) { - case *ast.LiteralType: - if t.LeadComment != nil { - for _, comment := range t.LeadComment.List { - if _, ok := standaloneComments[comment.Pos()]; ok { - delete(standaloneComments, comment.Pos()) - } - } - } - - if t.LineComment != nil { - for _, comment := range t.LineComment.List { - if _, ok := standaloneComments[comment.Pos()]; ok { - delete(standaloneComments, comment.Pos()) - } - } - } - case *ast.ObjectItem: - if t.LeadComment != nil { - for _, comment := range t.LeadComment.List { - if _, ok := standaloneComments[comment.Pos()]; ok { - delete(standaloneComments, comment.Pos()) - } - } - } - - if t.LineComment != nil { - for _, comment := range t.LineComment.List { - if _, ok := standaloneComments[comment.Pos()]; ok { - delete(standaloneComments, comment.Pos()) - } - } - } - } - - return nn, true - }) - - for _, c := range standaloneComments { - p.standaloneComments = append(p.standaloneComments, c) - } - - sort.Sort(ByPosition(p.standaloneComments)) -} - -// output prints creates b printable HCL output and returns it. -func (p *printer) output(n interface{}) []byte { - var buf bytes.Buffer - - switch t := n.(type) { - case *ast.File: - // File doesn't trace so we add the tracing here - defer un(trace(p, "File")) - return p.output(t.Node) - case *ast.ObjectList: - defer un(trace(p, "ObjectList")) - - var index int - for { - // Determine the location of the next actual non-comment - // item. If we're at the end, the next item is at "infinity" - var nextItem token.Pos - if index != len(t.Items) { - nextItem = t.Items[index].Pos() - } else { - nextItem = token.Pos{Offset: infinity, Line: infinity} - } - - // Go through the standalone comments in the file and print out - // the comments that we should be for this object item. - for _, c := range p.standaloneComments { - // Go through all the comments in the group. The group - // should be printed together, not separated by double newlines. - printed := false - newlinePrinted := false - for _, comment := range c.List { - // We only care about comments after the previous item - // we've printed so that comments are printed in the - // correct locations (between two objects for example). - // And before the next item. - if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) { - // if we hit the end add newlines so we can print the comment - // we don't do this if prev is invalid which means the - // beginning of the file since the first comment should - // be at the first line. - if !newlinePrinted && p.prev.IsValid() && index == len(t.Items) { - buf.Write([]byte{newline, newline}) - newlinePrinted = true - } - - // Write the actual comment. - buf.WriteString(comment.Text) - buf.WriteByte(newline) - - // Set printed to true to note that we printed something - printed = true - } - } - - // If we're not at the last item, write a new line so - // that there is a newline separating this comment from - // the next object. - if printed && index != len(t.Items) { - buf.WriteByte(newline) - } - } - - if index == len(t.Items) { - break - } - - buf.Write(p.output(t.Items[index])) - if index != len(t.Items)-1 { - // Always write a newline to separate us from the next item - buf.WriteByte(newline) - - // Need to determine if we're going to separate the next item - // with a blank line. The logic here is simple, though there - // are a few conditions: - // - // 1. The next object is more than one line away anyways, - // so we need an empty line. - // - // 2. The next object is not a "single line" object, so - // we need an empty line. - // - // 3. This current object is not a single line object, - // so we need an empty line. - current := t.Items[index] - next := t.Items[index+1] - if next.Pos().Line != t.Items[index].Pos().Line+1 || - !p.isSingleLineObject(next) || - !p.isSingleLineObject(current) { - buf.WriteByte(newline) - } - } - index++ - } - case *ast.ObjectKey: - buf.WriteString(t.Token.Text) - case *ast.ObjectItem: - p.prev = t.Pos() - buf.Write(p.objectItem(t)) - case *ast.LiteralType: - buf.Write(p.literalType(t)) - case *ast.ListType: - buf.Write(p.list(t)) - case *ast.ObjectType: - buf.Write(p.objectType(t)) - default: - fmt.Printf(" unknown type: %T\n", n) - } - - return buf.Bytes() -} - -func (p *printer) literalType(lit *ast.LiteralType) []byte { - result := []byte(lit.Token.Text) - switch lit.Token.Type { - case token.HEREDOC: - // Clear the trailing newline from heredocs - if result[len(result)-1] == '\n' { - result = result[:len(result)-1] - } - - // Poison lines 2+ so that we don't indent them - result = p.heredocIndent(result) - case token.STRING: - // If this is a multiline string, poison lines 2+ so we don't - // indent them. - if bytes.IndexRune(result, '\n') >= 0 { - result = p.heredocIndent(result) - } - } - - return result -} - -// objectItem returns the printable HCL form of an object item. An object type -// starts with one/multiple keys and has a value. The value might be of any -// type. -func (p *printer) objectItem(o *ast.ObjectItem) []byte { - defer un(trace(p, fmt.Sprintf("ObjectItem: %s", o.Keys[0].Token.Text))) - var buf bytes.Buffer - - if o.LeadComment != nil { - for _, comment := range o.LeadComment.List { - buf.WriteString(comment.Text) - buf.WriteByte(newline) - } - } - - // If key and val are on different lines, treat line comments like lead comments. - if o.LineComment != nil && o.Val.Pos().Line != o.Keys[0].Pos().Line { - for _, comment := range o.LineComment.List { - buf.WriteString(comment.Text) - buf.WriteByte(newline) - } - } - - for i, k := range o.Keys { - buf.WriteString(k.Token.Text) - buf.WriteByte(blank) - - // reach end of key - if o.Assign.IsValid() && i == len(o.Keys)-1 && len(o.Keys) == 1 { - buf.WriteString("=") - buf.WriteByte(blank) - } - } - - buf.Write(p.output(o.Val)) - - if o.LineComment != nil && o.Val.Pos().Line == o.Keys[0].Pos().Line { - buf.WriteByte(blank) - for _, comment := range o.LineComment.List { - buf.WriteString(comment.Text) - } - } - - return buf.Bytes() -} - -// objectType returns the printable HCL form of an object type. An object type -// begins with a brace and ends with a brace. -func (p *printer) objectType(o *ast.ObjectType) []byte { - defer un(trace(p, "ObjectType")) - var buf bytes.Buffer - buf.WriteString("{") - - var index int - var nextItem token.Pos - var commented, newlinePrinted bool - for { - // Determine the location of the next actual non-comment - // item. If we're at the end, the next item is the closing brace - if index != len(o.List.Items) { - nextItem = o.List.Items[index].Pos() - } else { - nextItem = o.Rbrace - } - - // Go through the standalone comments in the file and print out - // the comments that we should be for this object item. - for _, c := range p.standaloneComments { - printed := false - var lastCommentPos token.Pos - for _, comment := range c.List { - // We only care about comments after the previous item - // we've printed so that comments are printed in the - // correct locations (between two objects for example). - // And before the next item. - if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) { - // If there are standalone comments and the initial newline has not - // been printed yet, do it now. - if !newlinePrinted { - newlinePrinted = true - buf.WriteByte(newline) - } - - // add newline if it's between other printed nodes - if index > 0 { - commented = true - buf.WriteByte(newline) - } - - // Store this position - lastCommentPos = comment.Pos() - - // output the comment itself - buf.Write(p.indent(p.heredocIndent([]byte(comment.Text)))) - - // Set printed to true to note that we printed something - printed = true - - /* - if index != len(o.List.Items) { - buf.WriteByte(newline) // do not print on the end - } - */ - } - } - - // Stuff to do if we had comments - if printed { - // Always write a newline - buf.WriteByte(newline) - - // If there is another item in the object and our comment - // didn't hug it directly, then make sure there is a blank - // line separating them. - if nextItem != o.Rbrace && nextItem.Line != lastCommentPos.Line+1 { - buf.WriteByte(newline) - } - } - } - - if index == len(o.List.Items) { - p.prev = o.Rbrace - break - } - - // At this point we are sure that it's not a totally empty block: print - // the initial newline if it hasn't been printed yet by the previous - // block about standalone comments. - if !newlinePrinted { - buf.WriteByte(newline) - newlinePrinted = true - } - - // check if we have adjacent one liner items. If yes we'll going to align - // the comments. - var aligned []*ast.ObjectItem - for _, item := range o.List.Items[index:] { - // we don't group one line lists - if len(o.List.Items) == 1 { - break - } - - // one means a oneliner with out any lead comment - // two means a oneliner with lead comment - // anything else might be something else - cur := lines(string(p.objectItem(item))) - if cur > 2 { - break - } - - curPos := item.Pos() - - nextPos := token.Pos{} - if index != len(o.List.Items)-1 { - nextPos = o.List.Items[index+1].Pos() - } - - prevPos := token.Pos{} - if index != 0 { - prevPos = o.List.Items[index-1].Pos() - } - - // fmt.Println("DEBUG ----------------") - // fmt.Printf("prev = %+v prevPos: %s\n", prev, prevPos) - // fmt.Printf("cur = %+v curPos: %s\n", cur, curPos) - // fmt.Printf("next = %+v nextPos: %s\n", next, nextPos) - - if curPos.Line+1 == nextPos.Line { - aligned = append(aligned, item) - index++ - continue - } - - if curPos.Line-1 == prevPos.Line { - aligned = append(aligned, item) - index++ - - // finish if we have a new line or comment next. This happens - // if the next item is not adjacent - if curPos.Line+1 != nextPos.Line { - break - } - continue - } - - break - } - - // put newlines if the items are between other non aligned items. - // newlines are also added if there is a standalone comment already, so - // check it too - if !commented && index != len(aligned) { - buf.WriteByte(newline) - } - - if len(aligned) >= 1 { - p.prev = aligned[len(aligned)-1].Pos() - - items := p.alignedItems(aligned) - buf.Write(p.indent(items)) - } else { - p.prev = o.List.Items[index].Pos() - - buf.Write(p.indent(p.objectItem(o.List.Items[index]))) - index++ - } - - buf.WriteByte(newline) - } - - buf.WriteString("}") - return buf.Bytes() -} - -func (p *printer) alignedItems(items []*ast.ObjectItem) []byte { - var buf bytes.Buffer - - // find the longest key and value length, needed for alignment - var longestKeyLen int // longest key length - var longestValLen int // longest value length - for _, item := range items { - key := len(item.Keys[0].Token.Text) - val := len(p.output(item.Val)) - - if key > longestKeyLen { - longestKeyLen = key - } - - if val > longestValLen { - longestValLen = val - } - } - - for i, item := range items { - if item.LeadComment != nil { - for _, comment := range item.LeadComment.List { - buf.WriteString(comment.Text) - buf.WriteByte(newline) - } - } - - for i, k := range item.Keys { - keyLen := len(k.Token.Text) - buf.WriteString(k.Token.Text) - for i := 0; i < longestKeyLen-keyLen+1; i++ { - buf.WriteByte(blank) - } - - // reach end of key - if i == len(item.Keys)-1 && len(item.Keys) == 1 { - buf.WriteString("=") - buf.WriteByte(blank) - } - } - - val := p.output(item.Val) - valLen := len(val) - buf.Write(val) - - if item.Val.Pos().Line == item.Keys[0].Pos().Line && item.LineComment != nil { - for i := 0; i < longestValLen-valLen+1; i++ { - buf.WriteByte(blank) - } - - for _, comment := range item.LineComment.List { - buf.WriteString(comment.Text) - } - } - - // do not print for the last item - if i != len(items)-1 { - buf.WriteByte(newline) - } - } - - return buf.Bytes() -} - -// list returns the printable HCL form of an list type. -func (p *printer) list(l *ast.ListType) []byte { - if p.isSingleLineList(l) { - return p.singleLineList(l) - } - - var buf bytes.Buffer - buf.WriteString("[") - buf.WriteByte(newline) - - var longestLine int - for _, item := range l.List { - // for now we assume that the list only contains literal types - if lit, ok := item.(*ast.LiteralType); ok { - lineLen := len(lit.Token.Text) - if lineLen > longestLine { - longestLine = lineLen - } - } - } - - haveEmptyLine := false - for i, item := range l.List { - // If we have a lead comment, then we want to write that first - leadComment := false - if lit, ok := item.(*ast.LiteralType); ok && lit.LeadComment != nil { - leadComment = true - - // Ensure an empty line before every element with a - // lead comment (except the first item in a list). - if !haveEmptyLine && i != 0 { - buf.WriteByte(newline) - } - - for _, comment := range lit.LeadComment.List { - buf.Write(p.indent([]byte(comment.Text))) - buf.WriteByte(newline) - } - } - - // also indent each line - val := p.output(item) - curLen := len(val) - buf.Write(p.indent(val)) - - // if this item is a heredoc, then we output the comma on - // the next line. This is the only case this happens. - comma := []byte{','} - if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC { - buf.WriteByte(newline) - comma = p.indent(comma) - } - - buf.Write(comma) - - if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil { - // if the next item doesn't have any comments, do not align - buf.WriteByte(blank) // align one space - for i := 0; i < longestLine-curLen; i++ { - buf.WriteByte(blank) - } - - for _, comment := range lit.LineComment.List { - buf.WriteString(comment.Text) - } - } - - buf.WriteByte(newline) - - // Ensure an empty line after every element with a - // lead comment (except the first item in a list). - haveEmptyLine = leadComment && i != len(l.List)-1 - if haveEmptyLine { - buf.WriteByte(newline) - } - } - - buf.WriteString("]") - return buf.Bytes() -} - -// isSingleLineList returns true if: -// * they were previously formatted entirely on one line -// * they consist entirely of literals -// * there are either no heredoc strings or the list has exactly one element -// * there are no line comments -func (printer) isSingleLineList(l *ast.ListType) bool { - for _, item := range l.List { - if item.Pos().Line != l.Lbrack.Line { - return false - } - - lit, ok := item.(*ast.LiteralType) - if !ok { - return false - } - - if lit.Token.Type == token.HEREDOC && len(l.List) != 1 { - return false - } - - if lit.LineComment != nil { - return false - } - } - - return true -} - -// singleLineList prints a simple single line list. -// For a definition of "simple", see isSingleLineList above. -func (p *printer) singleLineList(l *ast.ListType) []byte { - buf := &bytes.Buffer{} - - buf.WriteString("[") - for i, item := range l.List { - if i != 0 { - buf.WriteString(", ") - } - - // Output the item itself - buf.Write(p.output(item)) - - // The heredoc marker needs to be at the end of line. - if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC { - buf.WriteByte(newline) - } - } - - buf.WriteString("]") - return buf.Bytes() -} - -// indent indents the lines of the given buffer for each non-empty line -func (p *printer) indent(buf []byte) []byte { - var prefix []byte - if p.cfg.SpacesWidth != 0 { - for i := 0; i < p.cfg.SpacesWidth; i++ { - prefix = append(prefix, blank) - } - } else { - prefix = []byte{tab} - } - - var res []byte - bol := true - for _, c := range buf { - if bol && c != '\n' { - res = append(res, prefix...) - } - - res = append(res, c) - bol = c == '\n' - } - return res -} - -// unindent removes all the indentation from the tombstoned lines -func (p *printer) unindent(buf []byte) []byte { - var res []byte - for i := 0; i < len(buf); i++ { - skip := len(buf)-i <= len(unindent) - if !skip { - skip = !bytes.Equal(unindent, buf[i:i+len(unindent)]) - } - if skip { - res = append(res, buf[i]) - continue - } - - // We have a marker. we have to backtrace here and clean out - // any whitespace ahead of our tombstone up to a \n - for j := len(res) - 1; j >= 0; j-- { - if res[j] == '\n' { - break - } - - res = res[:j] - } - - // Skip the entire unindent marker - i += len(unindent) - 1 - } - - return res -} - -// heredocIndent marks all the 2nd and further lines as unindentable -func (p *printer) heredocIndent(buf []byte) []byte { - var res []byte - bol := false - for _, c := range buf { - if bol && c != '\n' { - res = append(res, unindent...) - } - res = append(res, c) - bol = c == '\n' - } - return res -} - -// isSingleLineObject tells whether the given object item is a single -// line object such as "obj {}". -// -// A single line object: -// -// * has no lead comments (hence multi-line) -// * has no assignment -// * has no values in the stanza (within {}) -// -func (p *printer) isSingleLineObject(val *ast.ObjectItem) bool { - // If there is a lead comment, can't be one line - if val.LeadComment != nil { - return false - } - - // If there is assignment, we always break by line - if val.Assign.IsValid() { - return false - } - - // If it isn't an object type, then its not a single line object - ot, ok := val.Val.(*ast.ObjectType) - if !ok { - return false - } - - // If the object has no items, it is single line! - return len(ot.List.Items) == 0 -} - -func lines(txt string) int { - endline := 1 - for i := 0; i < len(txt); i++ { - if txt[i] == '\n' { - endline++ - } - } - return endline -} - -// ---------------------------------------------------------------------------- -// Tracing support - -func (p *printer) printTrace(a ...interface{}) { - if !p.enableTrace { - return - } - - const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " - const n = len(dots) - i := 2 * p.indentTrace - for i > n { - fmt.Print(dots) - i -= n - } - // i <= n - fmt.Print(dots[0:i]) - fmt.Println(a...) -} - -func trace(p *printer, msg string) *printer { - p.printTrace(msg, "(") - p.indentTrace++ - return p -} - -// Usage pattern: defer un(trace(p, "...")) -func un(p *printer) { - p.indentTrace-- - p.printTrace(")") -} diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go b/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go deleted file mode 100644 index 6617ab8e7..000000000 --- a/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go +++ /dev/null @@ -1,66 +0,0 @@ -// Package printer implements printing of AST nodes to HCL format. -package printer - -import ( - "bytes" - "io" - "text/tabwriter" - - "github.com/hashicorp/hcl/hcl/ast" - "github.com/hashicorp/hcl/hcl/parser" -) - -var DefaultConfig = Config{ - SpacesWidth: 2, -} - -// A Config node controls the output of Fprint. -type Config struct { - SpacesWidth int // if set, it will use spaces instead of tabs for alignment -} - -func (c *Config) Fprint(output io.Writer, node ast.Node) error { - p := &printer{ - cfg: *c, - comments: make([]*ast.CommentGroup, 0), - standaloneComments: make([]*ast.CommentGroup, 0), - // enableTrace: true, - } - - p.collectComments(node) - - if _, err := output.Write(p.unindent(p.output(node))); err != nil { - return err - } - - // flush tabwriter, if any - var err error - if tw, _ := output.(*tabwriter.Writer); tw != nil { - err = tw.Flush() - } - - return err -} - -// Fprint "pretty-prints" an HCL node to output -// It calls Config.Fprint with default settings. -func Fprint(output io.Writer, node ast.Node) error { - return DefaultConfig.Fprint(output, node) -} - -// Format formats src HCL and returns the result. -func Format(src []byte) ([]byte, error) { - node, err := parser.Parse(src) - if err != nil { - return nil, err - } - - var buf bytes.Buffer - if err := DefaultConfig.Fprint(&buf, node); err != nil { - return nil, err - } - - // Add trailing newline to result - buf.WriteString("\n") - return buf.Bytes(), nil -} diff --git a/vendor/github.com/magiconair/properties/LICENSE b/vendor/github.com/magiconair/properties/LICENSE deleted file mode 100644 index b387087c5..000000000 --- a/vendor/github.com/magiconair/properties/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -goproperties - properties file decoder for Go - -Copyright (c) 2013-2018 - Frank Schroeder - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/magiconair/properties/decode.go b/vendor/github.com/magiconair/properties/decode.go deleted file mode 100644 index 0a961bb04..000000000 --- a/vendor/github.com/magiconair/properties/decode.go +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright 2017 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -import ( - "fmt" - "reflect" - "strconv" - "strings" - "time" -) - -// Decode assigns property values to exported fields of a struct. -// -// Decode traverses v recursively and returns an error if a value cannot be -// converted to the field type or a required value is missing for a field. -// -// The following type dependent decodings are used: -// -// String, boolean, numeric fields have the value of the property key assigned. -// The property key name is the name of the field. A different key and a default -// value can be set in the field's tag. Fields without default value are -// required. If the value cannot be converted to the field type an error is -// returned. -// -// time.Duration fields have the result of time.ParseDuration() assigned. -// -// time.Time fields have the vaule of time.Parse() assigned. The default layout -// is time.RFC3339 but can be set in the field's tag. -// -// Arrays and slices of string, boolean, numeric, time.Duration and time.Time -// fields have the value interpreted as a comma separated list of values. The -// individual values are trimmed of whitespace and empty values are ignored. A -// default value can be provided as a semicolon separated list in the field's -// tag. -// -// Struct fields are decoded recursively using the field name plus "." as -// prefix. The prefix (without dot) can be overridden in the field's tag. -// Default values are not supported in the field's tag. Specify them on the -// fields of the inner struct instead. -// -// Map fields must have a key of type string and are decoded recursively by -// using the field's name plus ".' as prefix and the next element of the key -// name as map key. The prefix (without dot) can be overridden in the field's -// tag. Default values are not supported. -// -// Examples: -// -// // Field is ignored. -// Field int `properties:"-"` -// -// // Field is assigned value of 'Field'. -// Field int -// -// // Field is assigned value of 'myName'. -// Field int `properties:"myName"` -// -// // Field is assigned value of key 'myName' and has a default -// // value 15 if the key does not exist. -// Field int `properties:"myName,default=15"` -// -// // Field is assigned value of key 'Field' and has a default -// // value 15 if the key does not exist. -// Field int `properties:",default=15"` -// -// // Field is assigned value of key 'date' and the date -// // is in format 2006-01-02 -// Field time.Time `properties:"date,layout=2006-01-02"` -// -// // Field is assigned the non-empty and whitespace trimmed -// // values of key 'Field' split by commas. -// Field []string -// -// // Field is assigned the non-empty and whitespace trimmed -// // values of key 'Field' split by commas and has a default -// // value ["a", "b", "c"] if the key does not exist. -// Field []string `properties:",default=a;b;c"` -// -// // Field is decoded recursively with "Field." as key prefix. -// Field SomeStruct -// -// // Field is decoded recursively with "myName." as key prefix. -// Field SomeStruct `properties:"myName"` -// -// // Field is decoded recursively with "Field." as key prefix -// // and the next dotted element of the key as map key. -// Field map[string]string -// -// // Field is decoded recursively with "myName." as key prefix -// // and the next dotted element of the key as map key. -// Field map[string]string `properties:"myName"` -func (p *Properties) Decode(x interface{}) error { - t, v := reflect.TypeOf(x), reflect.ValueOf(x) - if t.Kind() != reflect.Ptr || v.Elem().Type().Kind() != reflect.Struct { - return fmt.Errorf("not a pointer to struct: %s", t) - } - if err := dec(p, "", nil, nil, v); err != nil { - return err - } - return nil -} - -func dec(p *Properties, key string, def *string, opts map[string]string, v reflect.Value) error { - t := v.Type() - - // value returns the property value for key or the default if provided. - value := func() (string, error) { - if val, ok := p.Get(key); ok { - return val, nil - } - if def != nil { - return *def, nil - } - return "", fmt.Errorf("missing required key %s", key) - } - - // conv converts a string to a value of the given type. - conv := func(s string, t reflect.Type) (val reflect.Value, err error) { - var v interface{} - - switch { - case isDuration(t): - v, err = time.ParseDuration(s) - - case isTime(t): - layout := opts["layout"] - if layout == "" { - layout = time.RFC3339 - } - v, err = time.Parse(layout, s) - - case isBool(t): - v, err = boolVal(s), nil - - case isString(t): - v, err = s, nil - - case isFloat(t): - v, err = strconv.ParseFloat(s, 64) - - case isInt(t): - v, err = strconv.ParseInt(s, 10, 64) - - case isUint(t): - v, err = strconv.ParseUint(s, 10, 64) - - default: - return reflect.Zero(t), fmt.Errorf("unsupported type %s", t) - } - if err != nil { - return reflect.Zero(t), err - } - return reflect.ValueOf(v).Convert(t), nil - } - - // keydef returns the property key and the default value based on the - // name of the struct field and the options in the tag. - keydef := func(f reflect.StructField) (string, *string, map[string]string) { - _key, _opts := parseTag(f.Tag.Get("properties")) - - var _def *string - if d, ok := _opts["default"]; ok { - _def = &d - } - if _key != "" { - return _key, _def, _opts - } - return f.Name, _def, _opts - } - - switch { - case isDuration(t) || isTime(t) || isBool(t) || isString(t) || isFloat(t) || isInt(t) || isUint(t): - s, err := value() - if err != nil { - return err - } - val, err := conv(s, t) - if err != nil { - return err - } - v.Set(val) - - case isPtr(t): - return dec(p, key, def, opts, v.Elem()) - - case isStruct(t): - for i := 0; i < v.NumField(); i++ { - fv := v.Field(i) - fk, def, opts := keydef(t.Field(i)) - if !fv.CanSet() { - return fmt.Errorf("cannot set %s", t.Field(i).Name) - } - if fk == "-" { - continue - } - if key != "" { - fk = key + "." + fk - } - if err := dec(p, fk, def, opts, fv); err != nil { - return err - } - } - return nil - - case isArray(t): - val, err := value() - if err != nil { - return err - } - vals := split(val, ";") - a := reflect.MakeSlice(t, 0, len(vals)) - for _, s := range vals { - val, err := conv(s, t.Elem()) - if err != nil { - return err - } - a = reflect.Append(a, val) - } - v.Set(a) - - case isMap(t): - valT := t.Elem() - m := reflect.MakeMap(t) - for postfix := range p.FilterStripPrefix(key + ".").m { - pp := strings.SplitN(postfix, ".", 2) - mk, mv := pp[0], reflect.New(valT) - if err := dec(p, key+"."+mk, nil, nil, mv); err != nil { - return err - } - m.SetMapIndex(reflect.ValueOf(mk), mv.Elem()) - } - v.Set(m) - - default: - return fmt.Errorf("unsupported type %s", t) - } - return nil -} - -// split splits a string on sep, trims whitespace of elements -// and omits empty elements -func split(s string, sep string) []string { - var a []string - for _, v := range strings.Split(s, sep) { - if v = strings.TrimSpace(v); v != "" { - a = append(a, v) - } - } - return a -} - -// parseTag parses a "key,k=v,k=v,..." -func parseTag(tag string) (key string, opts map[string]string) { - opts = map[string]string{} - for i, s := range strings.Split(tag, ",") { - if i == 0 { - key = s - continue - } - - pp := strings.SplitN(s, "=", 2) - if len(pp) == 1 { - opts[pp[0]] = "" - } else { - opts[pp[0]] = pp[1] - } - } - return key, opts -} - -func isArray(t reflect.Type) bool { return t.Kind() == reflect.Array || t.Kind() == reflect.Slice } -func isBool(t reflect.Type) bool { return t.Kind() == reflect.Bool } -func isDuration(t reflect.Type) bool { return t == reflect.TypeOf(time.Second) } -func isMap(t reflect.Type) bool { return t.Kind() == reflect.Map } -func isPtr(t reflect.Type) bool { return t.Kind() == reflect.Ptr } -func isString(t reflect.Type) bool { return t.Kind() == reflect.String } -func isStruct(t reflect.Type) bool { return t.Kind() == reflect.Struct } -func isTime(t reflect.Type) bool { return t == reflect.TypeOf(time.Time{}) } -func isFloat(t reflect.Type) bool { - return t.Kind() == reflect.Float32 || t.Kind() == reflect.Float64 -} -func isInt(t reflect.Type) bool { - return t.Kind() == reflect.Int || t.Kind() == reflect.Int8 || t.Kind() == reflect.Int16 || t.Kind() == reflect.Int32 || t.Kind() == reflect.Int64 -} -func isUint(t reflect.Type) bool { - return t.Kind() == reflect.Uint || t.Kind() == reflect.Uint8 || t.Kind() == reflect.Uint16 || t.Kind() == reflect.Uint32 || t.Kind() == reflect.Uint64 -} diff --git a/vendor/github.com/magiconair/properties/doc.go b/vendor/github.com/magiconair/properties/doc.go deleted file mode 100644 index 36c836808..000000000 --- a/vendor/github.com/magiconair/properties/doc.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2017 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package properties provides functions for reading and writing -// ISO-8859-1 and UTF-8 encoded .properties files and has -// support for recursive property expansion. -// -// Java properties files are ISO-8859-1 encoded and use Unicode -// literals for characters outside the ISO character set. Unicode -// literals can be used in UTF-8 encoded properties files but -// aren't necessary. -// -// To load a single properties file use MustLoadFile(): -// -// p := properties.MustLoadFile(filename, properties.UTF8) -// -// To load multiple properties files use MustLoadFiles() -// which loads the files in the given order and merges the -// result. Missing properties files can be ignored if the -// 'ignoreMissing' flag is set to true. -// -// Filenames can contain environment variables which are expanded -// before loading. -// -// f1 := "/etc/myapp/myapp.conf" -// f2 := "/home/${USER}/myapp.conf" -// p := MustLoadFiles([]string{f1, f2}, properties.UTF8, true) -// -// All of the different key/value delimiters ' ', ':' and '=' are -// supported as well as the comment characters '!' and '#' and -// multi-line values. -// -// ! this is a comment -// # and so is this -// -// # the following expressions are equal -// key value -// key=value -// key:value -// key = value -// key : value -// key = val\ -// ue -// -// Properties stores all comments preceding a key and provides -// GetComments() and SetComments() methods to retrieve and -// update them. The convenience functions GetComment() and -// SetComment() allow access to the last comment. The -// WriteComment() method writes properties files including -// the comments and with the keys in the original order. -// This can be used for sanitizing properties files. -// -// Property expansion is recursive and circular references -// and malformed expressions are not allowed and cause an -// error. Expansion of environment variables is supported. -// -// # standard property -// key = value -// -// # property expansion: key2 = value -// key2 = ${key} -// -// # recursive expansion: key3 = value -// key3 = ${key2} -// -// # circular reference (error) -// key = ${key} -// -// # malformed expression (error) -// key = ${ke -// -// # refers to the users' home dir -// home = ${HOME} -// -// # local key takes precendence over env var: u = foo -// USER = foo -// u = ${USER} -// -// The default property expansion format is ${key} but can be -// changed by setting different pre- and postfix values on the -// Properties object. -// -// p := properties.NewProperties() -// p.Prefix = "#[" -// p.Postfix = "]#" -// -// Properties provides convenience functions for getting typed -// values with default values if the key does not exist or the -// type conversion failed. -// -// # Returns true if the value is either "1", "on", "yes" or "true" -// # Returns false for every other value and the default value if -// # the key does not exist. -// v = p.GetBool("key", false) -// -// # Returns the value if the key exists and the format conversion -// # was successful. Otherwise, the default value is returned. -// v = p.GetInt64("key", 999) -// v = p.GetUint64("key", 999) -// v = p.GetFloat64("key", 123.0) -// v = p.GetString("key", "def") -// v = p.GetDuration("key", 999) -// -// As an alterantive properties may be applied with the standard -// library's flag implementation at any time. -// -// # Standard configuration -// v = flag.Int("key", 999, "help message") -// flag.Parse() -// -// # Merge p into the flag set -// p.MustFlag(flag.CommandLine) -// -// Properties provides several MustXXX() convenience functions -// which will terminate the app if an error occurs. The behavior -// of the failure is configurable and the default is to call -// log.Fatal(err). To have the MustXXX() functions panic instead -// of logging the error set a different ErrorHandler before -// you use the Properties package. -// -// properties.ErrorHandler = properties.PanicHandler -// -// # Will panic instead of logging an error -// p := properties.MustLoadFile("config.properties") -// -// You can also provide your own ErrorHandler function. The only requirement -// is that the error handler function must exit after handling the error. -// -// properties.ErrorHandler = func(err error) { -// fmt.Println(err) -// os.Exit(1) -// } -// -// # Will write to stdout and then exit -// p := properties.MustLoadFile("config.properties") -// -// Properties can also be loaded into a struct via the `Decode` -// method, e.g. -// -// type S struct { -// A string `properties:"a,default=foo"` -// D time.Duration `properties:"timeout,default=5s"` -// E time.Time `properties:"expires,layout=2006-01-02,default=2015-01-01"` -// } -// -// See `Decode()` method for the full documentation. -// -// The following documents provide a description of the properties -// file format. -// -// http://en.wikipedia.org/wiki/.properties -// -// http://docs.oracle.com/javase/7/docs/api/java/util/Properties.html#load%28java.io.Reader%29 -// -package properties diff --git a/vendor/github.com/magiconair/properties/integrate.go b/vendor/github.com/magiconair/properties/integrate.go deleted file mode 100644 index 0d775e035..000000000 --- a/vendor/github.com/magiconair/properties/integrate.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2017 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -import "flag" - -// MustFlag sets flags that are skipped by dst.Parse when p contains -// the respective key for flag.Flag.Name. -// -// It's use is recommended with command line arguments as in: -// flag.Parse() -// p.MustFlag(flag.CommandLine) -func (p *Properties) MustFlag(dst *flag.FlagSet) { - m := make(map[string]*flag.Flag) - dst.VisitAll(func(f *flag.Flag) { - m[f.Name] = f - }) - dst.Visit(func(f *flag.Flag) { - delete(m, f.Name) // overridden - }) - - for name, f := range m { - v, ok := p.Get(name) - if !ok { - continue - } - - if err := f.Value.Set(v); err != nil { - ErrorHandler(err) - } - } -} diff --git a/vendor/github.com/magiconair/properties/lex.go b/vendor/github.com/magiconair/properties/lex.go deleted file mode 100644 index c63fcc60d..000000000 --- a/vendor/github.com/magiconair/properties/lex.go +++ /dev/null @@ -1,407 +0,0 @@ -// Copyright 2017 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// -// Parts of the lexer are from the template/text/parser package -// For these parts the following applies: -// -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file of the go 1.2 -// distribution. - -package properties - -import ( - "fmt" - "strconv" - "strings" - "unicode/utf8" -) - -// item represents a token or text string returned from the scanner. -type item struct { - typ itemType // The type of this item. - pos int // The starting position, in bytes, of this item in the input string. - val string // The value of this item. -} - -func (i item) String() string { - switch { - case i.typ == itemEOF: - return "EOF" - case i.typ == itemError: - return i.val - case len(i.val) > 10: - return fmt.Sprintf("%.10q...", i.val) - } - return fmt.Sprintf("%q", i.val) -} - -// itemType identifies the type of lex items. -type itemType int - -const ( - itemError itemType = iota // error occurred; value is text of error - itemEOF - itemKey // a key - itemValue // a value - itemComment // a comment -) - -// defines a constant for EOF -const eof = -1 - -// permitted whitespace characters space, FF and TAB -const whitespace = " \f\t" - -// stateFn represents the state of the scanner as a function that returns the next state. -type stateFn func(*lexer) stateFn - -// lexer holds the state of the scanner. -type lexer struct { - input string // the string being scanned - state stateFn // the next lexing function to enter - pos int // current position in the input - start int // start position of this item - width int // width of last rune read from input - lastPos int // position of most recent item returned by nextItem - runes []rune // scanned runes for this item - items chan item // channel of scanned items -} - -// next returns the next rune in the input. -func (l *lexer) next() rune { - if l.pos >= len(l.input) { - l.width = 0 - return eof - } - r, w := utf8.DecodeRuneInString(l.input[l.pos:]) - l.width = w - l.pos += l.width - return r -} - -// peek returns but does not consume the next rune in the input. -func (l *lexer) peek() rune { - r := l.next() - l.backup() - return r -} - -// backup steps back one rune. Can only be called once per call of next. -func (l *lexer) backup() { - l.pos -= l.width -} - -// emit passes an item back to the client. -func (l *lexer) emit(t itemType) { - i := item{t, l.start, string(l.runes)} - l.items <- i - l.start = l.pos - l.runes = l.runes[:0] -} - -// ignore skips over the pending input before this point. -func (l *lexer) ignore() { - l.start = l.pos -} - -// appends the rune to the current value -func (l *lexer) appendRune(r rune) { - l.runes = append(l.runes, r) -} - -// accept consumes the next rune if it's from the valid set. -func (l *lexer) accept(valid string) bool { - if strings.ContainsRune(valid, l.next()) { - return true - } - l.backup() - return false -} - -// acceptRun consumes a run of runes from the valid set. -func (l *lexer) acceptRun(valid string) { - for strings.ContainsRune(valid, l.next()) { - } - l.backup() -} - -// acceptRunUntil consumes a run of runes up to a terminator. -func (l *lexer) acceptRunUntil(term rune) { - for term != l.next() { - } - l.backup() -} - -// hasText returns true if the current parsed text is not empty. -func (l *lexer) isNotEmpty() bool { - return l.pos > l.start -} - -// lineNumber reports which line we're on, based on the position of -// the previous item returned by nextItem. Doing it this way -// means we don't have to worry about peek double counting. -func (l *lexer) lineNumber() int { - return 1 + strings.Count(l.input[:l.lastPos], "\n") -} - -// errorf returns an error token and terminates the scan by passing -// back a nil pointer that will be the next state, terminating l.nextItem. -func (l *lexer) errorf(format string, args ...interface{}) stateFn { - l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)} - return nil -} - -// nextItem returns the next item from the input. -func (l *lexer) nextItem() item { - i := <-l.items - l.lastPos = i.pos - return i -} - -// lex creates a new scanner for the input string. -func lex(input string) *lexer { - l := &lexer{ - input: input, - items: make(chan item), - runes: make([]rune, 0, 32), - } - go l.run() - return l -} - -// run runs the state machine for the lexer. -func (l *lexer) run() { - for l.state = lexBeforeKey(l); l.state != nil; { - l.state = l.state(l) - } -} - -// state functions - -// lexBeforeKey scans until a key begins. -func lexBeforeKey(l *lexer) stateFn { - switch r := l.next(); { - case isEOF(r): - l.emit(itemEOF) - return nil - - case isEOL(r): - l.ignore() - return lexBeforeKey - - case isComment(r): - return lexComment - - case isWhitespace(r): - l.ignore() - return lexBeforeKey - - default: - l.backup() - return lexKey - } -} - -// lexComment scans a comment line. The comment character has already been scanned. -func lexComment(l *lexer) stateFn { - l.acceptRun(whitespace) - l.ignore() - for { - switch r := l.next(); { - case isEOF(r): - l.ignore() - l.emit(itemEOF) - return nil - case isEOL(r): - l.emit(itemComment) - return lexBeforeKey - default: - l.appendRune(r) - } - } -} - -// lexKey scans the key up to a delimiter -func lexKey(l *lexer) stateFn { - var r rune - -Loop: - for { - switch r = l.next(); { - - case isEscape(r): - err := l.scanEscapeSequence() - if err != nil { - return l.errorf(err.Error()) - } - - case isEndOfKey(r): - l.backup() - break Loop - - case isEOF(r): - break Loop - - default: - l.appendRune(r) - } - } - - if len(l.runes) > 0 { - l.emit(itemKey) - } - - if isEOF(r) { - l.emit(itemEOF) - return nil - } - - return lexBeforeValue -} - -// lexBeforeValue scans the delimiter between key and value. -// Leading and trailing whitespace is ignored. -// We expect to be just after the key. -func lexBeforeValue(l *lexer) stateFn { - l.acceptRun(whitespace) - l.accept(":=") - l.acceptRun(whitespace) - l.ignore() - return lexValue -} - -// lexValue scans text until the end of the line. We expect to be just after the delimiter. -func lexValue(l *lexer) stateFn { - for { - switch r := l.next(); { - case isEscape(r): - if isEOL(l.peek()) { - l.next() - l.acceptRun(whitespace) - } else { - err := l.scanEscapeSequence() - if err != nil { - return l.errorf(err.Error()) - } - } - - case isEOL(r): - l.emit(itemValue) - l.ignore() - return lexBeforeKey - - case isEOF(r): - l.emit(itemValue) - l.emit(itemEOF) - return nil - - default: - l.appendRune(r) - } - } -} - -// scanEscapeSequence scans either one of the escaped characters -// or a unicode literal. We expect to be after the escape character. -func (l *lexer) scanEscapeSequence() error { - switch r := l.next(); { - - case isEscapedCharacter(r): - l.appendRune(decodeEscapedCharacter(r)) - return nil - - case atUnicodeLiteral(r): - return l.scanUnicodeLiteral() - - case isEOF(r): - return fmt.Errorf("premature EOF") - - // silently drop the escape character and append the rune as is - default: - l.appendRune(r) - return nil - } -} - -// scans a unicode literal in the form \uXXXX. We expect to be after the \u. -func (l *lexer) scanUnicodeLiteral() error { - // scan the digits - d := make([]rune, 4) - for i := 0; i < 4; i++ { - d[i] = l.next() - if d[i] == eof || !strings.ContainsRune("0123456789abcdefABCDEF", d[i]) { - return fmt.Errorf("invalid unicode literal") - } - } - - // decode the digits into a rune - r, err := strconv.ParseInt(string(d), 16, 0) - if err != nil { - return err - } - - l.appendRune(rune(r)) - return nil -} - -// decodeEscapedCharacter returns the unescaped rune. We expect to be after the escape character. -func decodeEscapedCharacter(r rune) rune { - switch r { - case 'f': - return '\f' - case 'n': - return '\n' - case 'r': - return '\r' - case 't': - return '\t' - default: - return r - } -} - -// atUnicodeLiteral reports whether we are at a unicode literal. -// The escape character has already been consumed. -func atUnicodeLiteral(r rune) bool { - return r == 'u' -} - -// isComment reports whether we are at the start of a comment. -func isComment(r rune) bool { - return r == '#' || r == '!' -} - -// isEndOfKey reports whether the rune terminates the current key. -func isEndOfKey(r rune) bool { - return strings.ContainsRune(" \f\t\r\n:=", r) -} - -// isEOF reports whether we are at EOF. -func isEOF(r rune) bool { - return r == eof -} - -// isEOL reports whether we are at a new line character. -func isEOL(r rune) bool { - return r == '\n' || r == '\r' -} - -// isEscape reports whether the rune is the escape character which -// prefixes unicode literals and other escaped characters. -func isEscape(r rune) bool { - return r == '\\' -} - -// isEscapedCharacter reports whether we are at one of the characters that need escaping. -// The escape character has already been consumed. -func isEscapedCharacter(r rune) bool { - return strings.ContainsRune(" :=fnrt", r) -} - -// isWhitespace reports whether the rune is a whitespace character. -func isWhitespace(r rune) bool { - return strings.ContainsRune(whitespace, r) -} diff --git a/vendor/github.com/magiconair/properties/load.go b/vendor/github.com/magiconair/properties/load.go deleted file mode 100644 index 9c83fd638..000000000 --- a/vendor/github.com/magiconair/properties/load.go +++ /dev/null @@ -1,241 +0,0 @@ -// Copyright 2017 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -import ( - "fmt" - "io/ioutil" - "net/http" - "os" - "strings" -) - -// Encoding specifies encoding of the input data. -type Encoding uint - -const ( - // UTF8 interprets the input data as UTF-8. - UTF8 Encoding = 1 << iota - - // ISO_8859_1 interprets the input data as ISO-8859-1. - ISO_8859_1 -) - -// Load reads a buffer into a Properties struct. -func Load(buf []byte, enc Encoding) (*Properties, error) { - return loadBuf(buf, enc) -} - -// LoadString reads an UTF8 string into a properties struct. -func LoadString(s string) (*Properties, error) { - return loadBuf([]byte(s), UTF8) -} - -// LoadMap creates a new Properties struct from a string map. -func LoadMap(m map[string]string) *Properties { - p := NewProperties() - for k, v := range m { - p.Set(k, v) - } - return p -} - -// LoadFile reads a file into a Properties struct. -func LoadFile(filename string, enc Encoding) (*Properties, error) { - return loadAll([]string{filename}, enc, false) -} - -// LoadFiles reads multiple files in the given order into -// a Properties struct. If 'ignoreMissing' is true then -// non-existent files will not be reported as error. -func LoadFiles(filenames []string, enc Encoding, ignoreMissing bool) (*Properties, error) { - return loadAll(filenames, enc, ignoreMissing) -} - -// LoadURL reads the content of the URL into a Properties struct. -// -// The encoding is determined via the Content-Type header which -// should be set to 'text/plain'. If the 'charset' parameter is -// missing, 'iso-8859-1' or 'latin1' the encoding is set to -// ISO-8859-1. If the 'charset' parameter is set to 'utf-8' the -// encoding is set to UTF-8. A missing content type header is -// interpreted as 'text/plain; charset=utf-8'. -func LoadURL(url string) (*Properties, error) { - return loadAll([]string{url}, UTF8, false) -} - -// LoadURLs reads the content of multiple URLs in the given order into a -// Properties struct. If 'ignoreMissing' is true then a 404 status code will -// not be reported as error. See LoadURL for the Content-Type header -// and the encoding. -func LoadURLs(urls []string, ignoreMissing bool) (*Properties, error) { - return loadAll(urls, UTF8, ignoreMissing) -} - -// LoadAll reads the content of multiple URLs or files in the given order into a -// Properties struct. If 'ignoreMissing' is true then a 404 status code or missing file will -// not be reported as error. Encoding sets the encoding for files. For the URLs please see -// LoadURL for the Content-Type header and the encoding. -func LoadAll(names []string, enc Encoding, ignoreMissing bool) (*Properties, error) { - return loadAll(names, enc, ignoreMissing) -} - -// MustLoadString reads an UTF8 string into a Properties struct and -// panics on error. -func MustLoadString(s string) *Properties { - return must(LoadString(s)) -} - -// MustLoadFile reads a file into a Properties struct and -// panics on error. -func MustLoadFile(filename string, enc Encoding) *Properties { - return must(LoadFile(filename, enc)) -} - -// MustLoadFiles reads multiple files in the given order into -// a Properties struct and panics on error. If 'ignoreMissing' -// is true then non-existent files will not be reported as error. -func MustLoadFiles(filenames []string, enc Encoding, ignoreMissing bool) *Properties { - return must(LoadFiles(filenames, enc, ignoreMissing)) -} - -// MustLoadURL reads the content of a URL into a Properties struct and -// panics on error. -func MustLoadURL(url string) *Properties { - return must(LoadURL(url)) -} - -// MustLoadURLs reads the content of multiple URLs in the given order into a -// Properties struct and panics on error. If 'ignoreMissing' is true then a 404 -// status code will not be reported as error. -func MustLoadURLs(urls []string, ignoreMissing bool) *Properties { - return must(LoadURLs(urls, ignoreMissing)) -} - -// MustLoadAll reads the content of multiple URLs or files in the given order into a -// Properties struct. If 'ignoreMissing' is true then a 404 status code or missing file will -// not be reported as error. Encoding sets the encoding for files. For the URLs please see -// LoadURL for the Content-Type header and the encoding. It panics on error. -func MustLoadAll(names []string, enc Encoding, ignoreMissing bool) *Properties { - return must(LoadAll(names, enc, ignoreMissing)) -} - -func loadBuf(buf []byte, enc Encoding) (*Properties, error) { - p, err := parse(convert(buf, enc)) - if err != nil { - return nil, err - } - return p, p.check() -} - -func loadAll(names []string, enc Encoding, ignoreMissing bool) (*Properties, error) { - result := NewProperties() - for _, name := range names { - n, err := expandName(name) - if err != nil { - return nil, err - } - var p *Properties - if strings.HasPrefix(n, "http://") || strings.HasPrefix(n, "https://") { - p, err = loadURL(n, ignoreMissing) - } else { - p, err = loadFile(n, enc, ignoreMissing) - } - if err != nil { - return nil, err - } - result.Merge(p) - - } - return result, result.check() -} - -func loadFile(filename string, enc Encoding, ignoreMissing bool) (*Properties, error) { - data, err := ioutil.ReadFile(filename) - if err != nil { - if ignoreMissing && os.IsNotExist(err) { - LogPrintf("properties: %s not found. skipping", filename) - return NewProperties(), nil - } - return nil, err - } - p, err := parse(convert(data, enc)) - if err != nil { - return nil, err - } - return p, nil -} - -func loadURL(url string, ignoreMissing bool) (*Properties, error) { - resp, err := http.Get(url) - if err != nil { - return nil, fmt.Errorf("properties: error fetching %q. %s", url, err) - } - if resp.StatusCode == 404 && ignoreMissing { - LogPrintf("properties: %s returned %d. skipping", url, resp.StatusCode) - return NewProperties(), nil - } - if resp.StatusCode != 200 { - return nil, fmt.Errorf("properties: %s returned %d", url, resp.StatusCode) - } - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("properties: %s error reading response. %s", url, err) - } - if err = resp.Body.Close(); err != nil { - return nil, fmt.Errorf("properties: %s error reading response. %s", url, err) - } - - ct := resp.Header.Get("Content-Type") - var enc Encoding - switch strings.ToLower(ct) { - case "text/plain", "text/plain; charset=iso-8859-1", "text/plain; charset=latin1": - enc = ISO_8859_1 - case "", "text/plain; charset=utf-8": - enc = UTF8 - default: - return nil, fmt.Errorf("properties: invalid content type %s", ct) - } - - p, err := parse(convert(body, enc)) - if err != nil { - return nil, err - } - return p, nil -} - -func must(p *Properties, err error) *Properties { - if err != nil { - ErrorHandler(err) - } - return p -} - -// expandName expands ${ENV_VAR} expressions in a name. -// If the environment variable does not exist then it will be replaced -// with an empty string. Malformed expressions like "${ENV_VAR" will -// be reported as error. -func expandName(name string) (string, error) { - return expand(name, []string{}, "${", "}", make(map[string]string)) -} - -// Interprets a byte buffer either as an ISO-8859-1 or UTF-8 encoded string. -// For ISO-8859-1 we can convert each byte straight into a rune since the -// first 256 unicode code points cover ISO-8859-1. -func convert(buf []byte, enc Encoding) string { - switch enc { - case UTF8: - return string(buf) - case ISO_8859_1: - runes := make([]rune, len(buf)) - for i, b := range buf { - runes[i] = rune(b) - } - return string(runes) - default: - ErrorHandler(fmt.Errorf("unsupported encoding %v", enc)) - } - panic("ErrorHandler should exit") -} diff --git a/vendor/github.com/magiconair/properties/parser.go b/vendor/github.com/magiconair/properties/parser.go deleted file mode 100644 index 90f555cb9..000000000 --- a/vendor/github.com/magiconair/properties/parser.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2017 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -import ( - "fmt" - "runtime" -) - -type parser struct { - lex *lexer -} - -func parse(input string) (properties *Properties, err error) { - p := &parser{lex: lex(input)} - defer p.recover(&err) - - properties = NewProperties() - key := "" - comments := []string{} - - for { - token := p.expectOneOf(itemComment, itemKey, itemEOF) - switch token.typ { - case itemEOF: - goto done - case itemComment: - comments = append(comments, token.val) - continue - case itemKey: - key = token.val - if _, ok := properties.m[key]; !ok { - properties.k = append(properties.k, key) - } - } - - token = p.expectOneOf(itemValue, itemEOF) - if len(comments) > 0 { - properties.c[key] = comments - comments = []string{} - } - switch token.typ { - case itemEOF: - properties.m[key] = "" - goto done - case itemValue: - properties.m[key] = token.val - } - } - -done: - return properties, nil -} - -func (p *parser) errorf(format string, args ...interface{}) { - format = fmt.Sprintf("properties: Line %d: %s", p.lex.lineNumber(), format) - panic(fmt.Errorf(format, args...)) -} - -func (p *parser) expect(expected itemType) (token item) { - token = p.lex.nextItem() - if token.typ != expected { - p.unexpected(token) - } - return token -} - -func (p *parser) expectOneOf(expected ...itemType) (token item) { - token = p.lex.nextItem() - for _, v := range expected { - if token.typ == v { - return token - } - } - p.unexpected(token) - panic("unexpected token") -} - -func (p *parser) unexpected(token item) { - p.errorf(token.String()) -} - -// recover is the handler that turns panics into returns from the top level of Parse. -func (p *parser) recover(errp *error) { - e := recover() - if e != nil { - if _, ok := e.(runtime.Error); ok { - panic(e) - } - *errp = e.(error) - } - return -} diff --git a/vendor/github.com/magiconair/properties/properties.go b/vendor/github.com/magiconair/properties/properties.go deleted file mode 100644 index 53f5b2ff0..000000000 --- a/vendor/github.com/magiconair/properties/properties.go +++ /dev/null @@ -1,822 +0,0 @@ -// Copyright 2017 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -// BUG(frank): Set() does not check for invalid unicode literals since this is currently handled by the lexer. -// BUG(frank): Write() does not allow to configure the newline character. Therefore, on Windows LF is used. - -import ( - "fmt" - "io" - "log" - "os" - "regexp" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -const maxExpansionDepth = 64 - -// ErrorHandlerFunc defines the type of function which handles failures -// of the MustXXX() functions. An error handler function must exit -// the application after handling the error. -type ErrorHandlerFunc func(error) - -// ErrorHandler is the function which handles failures of the MustXXX() -// functions. The default is LogFatalHandler. -var ErrorHandler ErrorHandlerFunc = LogFatalHandler - -// LogHandlerFunc defines the function prototype for logging errors. -type LogHandlerFunc func(fmt string, args ...interface{}) - -// LogPrintf defines a log handler which uses log.Printf. -var LogPrintf LogHandlerFunc = log.Printf - -// LogFatalHandler handles the error by logging a fatal error and exiting. -func LogFatalHandler(err error) { - log.Fatal(err) -} - -// PanicHandler handles the error by panicking. -func PanicHandler(err error) { - panic(err) -} - -// ----------------------------------------------------------------------------- - -// A Properties contains the key/value pairs from the properties input. -// All values are stored in unexpanded form and are expanded at runtime -type Properties struct { - // Pre-/Postfix for property expansion. - Prefix string - Postfix string - - // DisableExpansion controls the expansion of properties on Get() - // and the check for circular references on Set(). When set to - // true Properties behaves like a simple key/value store and does - // not check for circular references on Get() or on Set(). - DisableExpansion bool - - // Stores the key/value pairs - m map[string]string - - // Stores the comments per key. - c map[string][]string - - // Stores the keys in order of appearance. - k []string -} - -// NewProperties creates a new Properties struct with the default -// configuration for "${key}" expressions. -func NewProperties() *Properties { - return &Properties{ - Prefix: "${", - Postfix: "}", - m: map[string]string{}, - c: map[string][]string{}, - k: []string{}, - } -} - -// Get returns the expanded value for the given key if exists. -// Otherwise, ok is false. -func (p *Properties) Get(key string) (value string, ok bool) { - v, ok := p.m[key] - if p.DisableExpansion { - return v, ok - } - if !ok { - return "", false - } - - expanded, err := p.expand(key, v) - - // we guarantee that the expanded value is free of - // circular references and malformed expressions - // so we panic if we still get an error here. - if err != nil { - ErrorHandler(fmt.Errorf("%s in %q", err, key+" = "+v)) - } - - return expanded, true -} - -// MustGet returns the expanded value for the given key if exists. -// Otherwise, it panics. -func (p *Properties) MustGet(key string) string { - if v, ok := p.Get(key); ok { - return v - } - ErrorHandler(invalidKeyError(key)) - panic("ErrorHandler should exit") -} - -// ---------------------------------------------------------------------------- - -// ClearComments removes the comments for all keys. -func (p *Properties) ClearComments() { - p.c = map[string][]string{} -} - -// ---------------------------------------------------------------------------- - -// GetComment returns the last comment before the given key or an empty string. -func (p *Properties) GetComment(key string) string { - comments, ok := p.c[key] - if !ok || len(comments) == 0 { - return "" - } - return comments[len(comments)-1] -} - -// ---------------------------------------------------------------------------- - -// GetComments returns all comments that appeared before the given key or nil. -func (p *Properties) GetComments(key string) []string { - if comments, ok := p.c[key]; ok { - return comments - } - return nil -} - -// ---------------------------------------------------------------------------- - -// SetComment sets the comment for the key. -func (p *Properties) SetComment(key, comment string) { - p.c[key] = []string{comment} -} - -// ---------------------------------------------------------------------------- - -// SetComments sets the comments for the key. If the comments are nil then -// all comments for this key are deleted. -func (p *Properties) SetComments(key string, comments []string) { - if comments == nil { - delete(p.c, key) - return - } - p.c[key] = comments -} - -// ---------------------------------------------------------------------------- - -// GetBool checks if the expanded value is one of '1', 'yes', -// 'true' or 'on' if the key exists. The comparison is case-insensitive. -// If the key does not exist the default value is returned. -func (p *Properties) GetBool(key string, def bool) bool { - v, err := p.getBool(key) - if err != nil { - return def - } - return v -} - -// MustGetBool checks if the expanded value is one of '1', 'yes', -// 'true' or 'on' if the key exists. The comparison is case-insensitive. -// If the key does not exist the function panics. -func (p *Properties) MustGetBool(key string) bool { - v, err := p.getBool(key) - if err != nil { - ErrorHandler(err) - } - return v -} - -func (p *Properties) getBool(key string) (value bool, err error) { - if v, ok := p.Get(key); ok { - return boolVal(v), nil - } - return false, invalidKeyError(key) -} - -func boolVal(v string) bool { - v = strings.ToLower(v) - return v == "1" || v == "true" || v == "yes" || v == "on" -} - -// ---------------------------------------------------------------------------- - -// GetDuration parses the expanded value as an time.Duration (in ns) if the -// key exists. If key does not exist or the value cannot be parsed the default -// value is returned. In almost all cases you want to use GetParsedDuration(). -func (p *Properties) GetDuration(key string, def time.Duration) time.Duration { - v, err := p.getInt64(key) - if err != nil { - return def - } - return time.Duration(v) -} - -// MustGetDuration parses the expanded value as an time.Duration (in ns) if -// the key exists. If key does not exist or the value cannot be parsed the -// function panics. In almost all cases you want to use MustGetParsedDuration(). -func (p *Properties) MustGetDuration(key string) time.Duration { - v, err := p.getInt64(key) - if err != nil { - ErrorHandler(err) - } - return time.Duration(v) -} - -// ---------------------------------------------------------------------------- - -// GetParsedDuration parses the expanded value with time.ParseDuration() if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. -func (p *Properties) GetParsedDuration(key string, def time.Duration) time.Duration { - s, ok := p.Get(key) - if !ok { - return def - } - v, err := time.ParseDuration(s) - if err != nil { - return def - } - return v -} - -// MustGetParsedDuration parses the expanded value with time.ParseDuration() if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -func (p *Properties) MustGetParsedDuration(key string) time.Duration { - s, ok := p.Get(key) - if !ok { - ErrorHandler(invalidKeyError(key)) - } - v, err := time.ParseDuration(s) - if err != nil { - ErrorHandler(err) - } - return v -} - -// ---------------------------------------------------------------------------- - -// GetFloat64 parses the expanded value as a float64 if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. -func (p *Properties) GetFloat64(key string, def float64) float64 { - v, err := p.getFloat64(key) - if err != nil { - return def - } - return v -} - -// MustGetFloat64 parses the expanded value as a float64 if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -func (p *Properties) MustGetFloat64(key string) float64 { - v, err := p.getFloat64(key) - if err != nil { - ErrorHandler(err) - } - return v -} - -func (p *Properties) getFloat64(key string) (value float64, err error) { - if v, ok := p.Get(key); ok { - value, err = strconv.ParseFloat(v, 64) - if err != nil { - return 0, err - } - return value, nil - } - return 0, invalidKeyError(key) -} - -// ---------------------------------------------------------------------------- - -// GetInt parses the expanded value as an int if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. If the value does not fit into an int the -// function panics with an out of range error. -func (p *Properties) GetInt(key string, def int) int { - v, err := p.getInt64(key) - if err != nil { - return def - } - return intRangeCheck(key, v) -} - -// MustGetInt parses the expanded value as an int if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -// If the value does not fit into an int the function panics with -// an out of range error. -func (p *Properties) MustGetInt(key string) int { - v, err := p.getInt64(key) - if err != nil { - ErrorHandler(err) - } - return intRangeCheck(key, v) -} - -// ---------------------------------------------------------------------------- - -// GetInt64 parses the expanded value as an int64 if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. -func (p *Properties) GetInt64(key string, def int64) int64 { - v, err := p.getInt64(key) - if err != nil { - return def - } - return v -} - -// MustGetInt64 parses the expanded value as an int if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -func (p *Properties) MustGetInt64(key string) int64 { - v, err := p.getInt64(key) - if err != nil { - ErrorHandler(err) - } - return v -} - -func (p *Properties) getInt64(key string) (value int64, err error) { - if v, ok := p.Get(key); ok { - value, err = strconv.ParseInt(v, 10, 64) - if err != nil { - return 0, err - } - return value, nil - } - return 0, invalidKeyError(key) -} - -// ---------------------------------------------------------------------------- - -// GetUint parses the expanded value as an uint if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. If the value does not fit into an int the -// function panics with an out of range error. -func (p *Properties) GetUint(key string, def uint) uint { - v, err := p.getUint64(key) - if err != nil { - return def - } - return uintRangeCheck(key, v) -} - -// MustGetUint parses the expanded value as an int if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -// If the value does not fit into an int the function panics with -// an out of range error. -func (p *Properties) MustGetUint(key string) uint { - v, err := p.getUint64(key) - if err != nil { - ErrorHandler(err) - } - return uintRangeCheck(key, v) -} - -// ---------------------------------------------------------------------------- - -// GetUint64 parses the expanded value as an uint64 if the key exists. -// If key does not exist or the value cannot be parsed the default -// value is returned. -func (p *Properties) GetUint64(key string, def uint64) uint64 { - v, err := p.getUint64(key) - if err != nil { - return def - } - return v -} - -// MustGetUint64 parses the expanded value as an int if the key exists. -// If key does not exist or the value cannot be parsed the function panics. -func (p *Properties) MustGetUint64(key string) uint64 { - v, err := p.getUint64(key) - if err != nil { - ErrorHandler(err) - } - return v -} - -func (p *Properties) getUint64(key string) (value uint64, err error) { - if v, ok := p.Get(key); ok { - value, err = strconv.ParseUint(v, 10, 64) - if err != nil { - return 0, err - } - return value, nil - } - return 0, invalidKeyError(key) -} - -// ---------------------------------------------------------------------------- - -// GetString returns the expanded value for the given key if exists or -// the default value otherwise. -func (p *Properties) GetString(key, def string) string { - if v, ok := p.Get(key); ok { - return v - } - return def -} - -// MustGetString returns the expanded value for the given key if exists or -// panics otherwise. -func (p *Properties) MustGetString(key string) string { - if v, ok := p.Get(key); ok { - return v - } - ErrorHandler(invalidKeyError(key)) - panic("ErrorHandler should exit") -} - -// ---------------------------------------------------------------------------- - -// Filter returns a new properties object which contains all properties -// for which the key matches the pattern. -func (p *Properties) Filter(pattern string) (*Properties, error) { - re, err := regexp.Compile(pattern) - if err != nil { - return nil, err - } - - return p.FilterRegexp(re), nil -} - -// FilterRegexp returns a new properties object which contains all properties -// for which the key matches the regular expression. -func (p *Properties) FilterRegexp(re *regexp.Regexp) *Properties { - pp := NewProperties() - for _, k := range p.k { - if re.MatchString(k) { - // TODO(fs): we are ignoring the error which flags a circular reference. - // TODO(fs): since we are just copying a subset of keys this cannot happen (fingers crossed) - pp.Set(k, p.m[k]) - } - } - return pp -} - -// FilterPrefix returns a new properties object with a subset of all keys -// with the given prefix. -func (p *Properties) FilterPrefix(prefix string) *Properties { - pp := NewProperties() - for _, k := range p.k { - if strings.HasPrefix(k, prefix) { - // TODO(fs): we are ignoring the error which flags a circular reference. - // TODO(fs): since we are just copying a subset of keys this cannot happen (fingers crossed) - pp.Set(k, p.m[k]) - } - } - return pp -} - -// FilterStripPrefix returns a new properties object with a subset of all keys -// with the given prefix and the prefix removed from the keys. -func (p *Properties) FilterStripPrefix(prefix string) *Properties { - pp := NewProperties() - n := len(prefix) - for _, k := range p.k { - if len(k) > len(prefix) && strings.HasPrefix(k, prefix) { - // TODO(fs): we are ignoring the error which flags a circular reference. - // TODO(fs): since we are modifying keys I am not entirely sure whether we can create a circular reference - // TODO(fs): this function should probably return an error but the signature is fixed - pp.Set(k[n:], p.m[k]) - } - } - return pp -} - -// Len returns the number of keys. -func (p *Properties) Len() int { - return len(p.m) -} - -// Keys returns all keys in the same order as in the input. -func (p *Properties) Keys() []string { - keys := make([]string, len(p.k)) - copy(keys, p.k) - return keys -} - -// Set sets the property key to the corresponding value. -// If a value for key existed before then ok is true and prev -// contains the previous value. If the value contains a -// circular reference or a malformed expression then -// an error is returned. -// An empty key is silently ignored. -func (p *Properties) Set(key, value string) (prev string, ok bool, err error) { - if key == "" { - return "", false, nil - } - - // if expansion is disabled we allow circular references - if p.DisableExpansion { - prev, ok = p.Get(key) - p.m[key] = value - if !ok { - p.k = append(p.k, key) - } - return prev, ok, nil - } - - // to check for a circular reference we temporarily need - // to set the new value. If there is an error then revert - // to the previous state. Only if all tests are successful - // then we add the key to the p.k list. - prev, ok = p.Get(key) - p.m[key] = value - - // now check for a circular reference - _, err = p.expand(key, value) - if err != nil { - - // revert to the previous state - if ok { - p.m[key] = prev - } else { - delete(p.m, key) - } - - return "", false, err - } - - if !ok { - p.k = append(p.k, key) - } - - return prev, ok, nil -} - -// SetValue sets property key to the default string value -// as defined by fmt.Sprintf("%v"). -func (p *Properties) SetValue(key string, value interface{}) error { - _, _, err := p.Set(key, fmt.Sprintf("%v", value)) - return err -} - -// MustSet sets the property key to the corresponding value. -// If a value for key existed before then ok is true and prev -// contains the previous value. An empty key is silently ignored. -func (p *Properties) MustSet(key, value string) (prev string, ok bool) { - prev, ok, err := p.Set(key, value) - if err != nil { - ErrorHandler(err) - } - return prev, ok -} - -// String returns a string of all expanded 'key = value' pairs. -func (p *Properties) String() string { - var s string - for _, key := range p.k { - value, _ := p.Get(key) - s = fmt.Sprintf("%s%s = %s\n", s, key, value) - } - return s -} - -// Write writes all unexpanded 'key = value' pairs to the given writer. -// Write returns the number of bytes written and any write error encountered. -func (p *Properties) Write(w io.Writer, enc Encoding) (n int, err error) { - return p.WriteComment(w, "", enc) -} - -// WriteComment writes all unexpanced 'key = value' pairs to the given writer. -// If prefix is not empty then comments are written with a blank line and the -// given prefix. The prefix should be either "# " or "! " to be compatible with -// the properties file format. Otherwise, the properties parser will not be -// able to read the file back in. It returns the number of bytes written and -// any write error encountered. -func (p *Properties) WriteComment(w io.Writer, prefix string, enc Encoding) (n int, err error) { - var x int - - for _, key := range p.k { - value := p.m[key] - - if prefix != "" { - if comments, ok := p.c[key]; ok { - // don't print comments if they are all empty - allEmpty := true - for _, c := range comments { - if c != "" { - allEmpty = false - break - } - } - - if !allEmpty { - // add a blank line between entries but not at the top - if len(comments) > 0 && n > 0 { - x, err = fmt.Fprintln(w) - if err != nil { - return - } - n += x - } - - for _, c := range comments { - x, err = fmt.Fprintf(w, "%s%s\n", prefix, encode(c, "", enc)) - if err != nil { - return - } - n += x - } - } - } - } - - x, err = fmt.Fprintf(w, "%s = %s\n", encode(key, " :", enc), encode(value, "", enc)) - if err != nil { - return - } - n += x - } - return -} - -// Map returns a copy of the properties as a map. -func (p *Properties) Map() map[string]string { - m := make(map[string]string) - for k, v := range p.m { - m[k] = v - } - return m -} - -// FilterFunc returns a copy of the properties which includes the values which passed all filters. -func (p *Properties) FilterFunc(filters ...func(k, v string) bool) *Properties { - pp := NewProperties() -outer: - for k, v := range p.m { - for _, f := range filters { - if !f(k, v) { - continue outer - } - pp.Set(k, v) - } - } - return pp -} - -// ---------------------------------------------------------------------------- - -// Delete removes the key and its comments. -func (p *Properties) Delete(key string) { - delete(p.m, key) - delete(p.c, key) - newKeys := []string{} - for _, k := range p.k { - if k != key { - newKeys = append(newKeys, k) - } - } - p.k = newKeys -} - -// Merge merges properties, comments and keys from other *Properties into p -func (p *Properties) Merge(other *Properties) { - for k, v := range other.m { - p.m[k] = v - } - for k, v := range other.c { - p.c[k] = v - } - -outer: - for _, otherKey := range other.k { - for _, key := range p.k { - if otherKey == key { - continue outer - } - } - p.k = append(p.k, otherKey) - } -} - -// ---------------------------------------------------------------------------- - -// check expands all values and returns an error if a circular reference or -// a malformed expression was found. -func (p *Properties) check() error { - for key, value := range p.m { - if _, err := p.expand(key, value); err != nil { - return err - } - } - return nil -} - -func (p *Properties) expand(key, input string) (string, error) { - // no pre/postfix -> nothing to expand - if p.Prefix == "" && p.Postfix == "" { - return input, nil - } - - return expand(input, []string{key}, p.Prefix, p.Postfix, p.m) -} - -// expand recursively expands expressions of '(prefix)key(postfix)' to their corresponding values. -// The function keeps track of the keys that were already expanded and stops if it -// detects a circular reference or a malformed expression of the form '(prefix)key'. -func expand(s string, keys []string, prefix, postfix string, values map[string]string) (string, error) { - if len(keys) > maxExpansionDepth { - return "", fmt.Errorf("expansion too deep") - } - - for { - start := strings.Index(s, prefix) - if start == -1 { - return s, nil - } - - keyStart := start + len(prefix) - keyLen := strings.Index(s[keyStart:], postfix) - if keyLen == -1 { - return "", fmt.Errorf("malformed expression") - } - - end := keyStart + keyLen + len(postfix) - 1 - key := s[keyStart : keyStart+keyLen] - - // fmt.Printf("s:%q pp:%q start:%d end:%d keyStart:%d keyLen:%d key:%q\n", s, prefix + "..." + postfix, start, end, keyStart, keyLen, key) - - for _, k := range keys { - if key == k { - return "", fmt.Errorf("circular reference") - } - } - - val, ok := values[key] - if !ok { - val = os.Getenv(key) - } - new_val, err := expand(val, append(keys, key), prefix, postfix, values) - if err != nil { - return "", err - } - s = s[:start] + new_val + s[end+1:] - } - return s, nil -} - -// encode encodes a UTF-8 string to ISO-8859-1 and escapes some characters. -func encode(s string, special string, enc Encoding) string { - switch enc { - case UTF8: - return encodeUtf8(s, special) - case ISO_8859_1: - return encodeIso(s, special) - default: - panic(fmt.Sprintf("unsupported encoding %v", enc)) - } -} - -func encodeUtf8(s string, special string) string { - v := "" - for pos := 0; pos < len(s); { - r, w := utf8.DecodeRuneInString(s[pos:]) - pos += w - v += escape(r, special) - } - return v -} - -func encodeIso(s string, special string) string { - var r rune - var w int - var v string - for pos := 0; pos < len(s); { - switch r, w = utf8.DecodeRuneInString(s[pos:]); { - case r < 1<<8: // single byte rune -> escape special chars only - v += escape(r, special) - case r < 1<<16: // two byte rune -> unicode literal - v += fmt.Sprintf("\\u%04x", r) - default: // more than two bytes per rune -> can't encode - v += "?" - } - pos += w - } - return v -} - -func escape(r rune, special string) string { - switch r { - case '\f': - return "\\f" - case '\n': - return "\\n" - case '\r': - return "\\r" - case '\t': - return "\\t" - default: - if strings.ContainsRune(special, r) { - return "\\" + string(r) - } - return string(r) - } -} - -func invalidKeyError(key string) error { - return fmt.Errorf("unknown property: %s", key) -} diff --git a/vendor/github.com/magiconair/properties/rangecheck.go b/vendor/github.com/magiconair/properties/rangecheck.go deleted file mode 100644 index 2e907d540..000000000 --- a/vendor/github.com/magiconair/properties/rangecheck.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2017 Frank Schroeder. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package properties - -import ( - "fmt" - "math" -) - -// make this a var to overwrite it in a test -var is32Bit = ^uint(0) == math.MaxUint32 - -// intRangeCheck checks if the value fits into the int type and -// panics if it does not. -func intRangeCheck(key string, v int64) int { - if is32Bit && (v < math.MinInt32 || v > math.MaxInt32) { - panic(fmt.Sprintf("Value %d for key %s out of range", v, key)) - } - return int(v) -} - -// uintRangeCheck checks if the value fits into the uint type and -// panics if it does not. -func uintRangeCheck(key string, v uint64) uint { - if is32Bit && v > math.MaxUint32 { - panic(fmt.Sprintf("Value %d for key %s out of range", v, key)) - } - return uint(v) -} diff --git a/vendor/github.com/pelletier/go-toml/LICENSE b/vendor/github.com/pelletier/go-toml/LICENSE deleted file mode 100644 index 583bdae62..000000000 --- a/vendor/github.com/pelletier/go-toml/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 - 2017 Thomas Pelletier, Eric Anderton - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/pelletier/go-toml/doc.go b/vendor/github.com/pelletier/go-toml/doc.go deleted file mode 100644 index d5fd98c02..000000000 --- a/vendor/github.com/pelletier/go-toml/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -// Package toml is a TOML parser and manipulation library. -// -// This version supports the specification as described in -// https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md -// -// Marshaling -// -// Go-toml can marshal and unmarshal TOML documents from and to data -// structures. -// -// TOML document as a tree -// -// Go-toml can operate on a TOML document as a tree. Use one of the Load* -// functions to parse TOML data and obtain a Tree instance, then one of its -// methods to manipulate the tree. -// -// JSONPath-like queries -// -// The package github.com/pelletier/go-toml/query implements a system -// similar to JSONPath to quickly retrieve elements of a TOML document using a -// single expression. See the package documentation for more information. -// -package toml diff --git a/vendor/github.com/pelletier/go-toml/fuzz.go b/vendor/github.com/pelletier/go-toml/fuzz.go deleted file mode 100644 index 14570c8d3..000000000 --- a/vendor/github.com/pelletier/go-toml/fuzz.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build gofuzz - -package toml - -func Fuzz(data []byte) int { - tree, err := LoadBytes(data) - if err != nil { - if tree != nil { - panic("tree must be nil if there is an error") - } - return 0 - } - - str, err := tree.ToTomlString() - if err != nil { - if str != "" { - panic(`str must be "" if there is an error`) - } - panic(err) - } - - tree, err = Load(str) - if err != nil { - if tree != nil { - panic("tree must be nil if there is an error") - } - return 0 - } - - return 1 -} diff --git a/vendor/github.com/pelletier/go-toml/keysparsing.go b/vendor/github.com/pelletier/go-toml/keysparsing.go deleted file mode 100644 index 284db6467..000000000 --- a/vendor/github.com/pelletier/go-toml/keysparsing.go +++ /dev/null @@ -1,85 +0,0 @@ -// Parsing keys handling both bare and quoted keys. - -package toml - -import ( - "bytes" - "errors" - "fmt" - "unicode" -) - -// Convert the bare key group string to an array. -// The input supports double quotation to allow "." inside the key name, -// but escape sequences are not supported. Lexers must unescape them beforehand. -func parseKey(key string) ([]string, error) { - groups := []string{} - var buffer bytes.Buffer - inQuotes := false - wasInQuotes := false - ignoreSpace := true - expectDot := false - - for _, char := range key { - if ignoreSpace { - if char == ' ' { - continue - } - ignoreSpace = false - } - switch char { - case '"': - if inQuotes { - groups = append(groups, buffer.String()) - buffer.Reset() - wasInQuotes = true - } - inQuotes = !inQuotes - expectDot = false - case '.': - if inQuotes { - buffer.WriteRune(char) - } else { - if !wasInQuotes { - if buffer.Len() == 0 { - return nil, errors.New("empty table key") - } - groups = append(groups, buffer.String()) - buffer.Reset() - } - ignoreSpace = true - expectDot = false - wasInQuotes = false - } - case ' ': - if inQuotes { - buffer.WriteRune(char) - } else { - expectDot = true - } - default: - if !inQuotes && !isValidBareChar(char) { - return nil, fmt.Errorf("invalid bare character: %c", char) - } - if !inQuotes && expectDot { - return nil, errors.New("what?") - } - buffer.WriteRune(char) - expectDot = false - } - } - if inQuotes { - return nil, errors.New("mismatched quotes") - } - if buffer.Len() > 0 { - groups = append(groups, buffer.String()) - } - if len(groups) == 0 { - return nil, errors.New("empty key") - } - return groups, nil -} - -func isValidBareChar(r rune) bool { - return isAlphanumeric(r) || r == '-' || unicode.IsNumber(r) -} diff --git a/vendor/github.com/pelletier/go-toml/lexer.go b/vendor/github.com/pelletier/go-toml/lexer.go deleted file mode 100644 index d11de4285..000000000 --- a/vendor/github.com/pelletier/go-toml/lexer.go +++ /dev/null @@ -1,750 +0,0 @@ -// TOML lexer. -// -// Written using the principles developed by Rob Pike in -// http://www.youtube.com/watch?v=HxaD_trXwRE - -package toml - -import ( - "bytes" - "errors" - "fmt" - "regexp" - "strconv" - "strings" -) - -var dateRegexp *regexp.Regexp - -// Define state functions -type tomlLexStateFn func() tomlLexStateFn - -// Define lexer -type tomlLexer struct { - inputIdx int - input []rune // Textual source - currentTokenStart int - currentTokenStop int - tokens []token - depth int - line int - col int - endbufferLine int - endbufferCol int -} - -// Basic read operations on input - -func (l *tomlLexer) read() rune { - r := l.peek() - if r == '\n' { - l.endbufferLine++ - l.endbufferCol = 1 - } else { - l.endbufferCol++ - } - l.inputIdx++ - return r -} - -func (l *tomlLexer) next() rune { - r := l.read() - - if r != eof { - l.currentTokenStop++ - } - return r -} - -func (l *tomlLexer) ignore() { - l.currentTokenStart = l.currentTokenStop - l.line = l.endbufferLine - l.col = l.endbufferCol -} - -func (l *tomlLexer) skip() { - l.next() - l.ignore() -} - -func (l *tomlLexer) fastForward(n int) { - for i := 0; i < n; i++ { - l.next() - } -} - -func (l *tomlLexer) emitWithValue(t tokenType, value string) { - l.tokens = append(l.tokens, token{ - Position: Position{l.line, l.col}, - typ: t, - val: value, - }) - l.ignore() -} - -func (l *tomlLexer) emit(t tokenType) { - l.emitWithValue(t, string(l.input[l.currentTokenStart:l.currentTokenStop])) -} - -func (l *tomlLexer) peek() rune { - if l.inputIdx >= len(l.input) { - return eof - } - return l.input[l.inputIdx] -} - -func (l *tomlLexer) peekString(size int) string { - maxIdx := len(l.input) - upperIdx := l.inputIdx + size // FIXME: potential overflow - if upperIdx > maxIdx { - upperIdx = maxIdx - } - return string(l.input[l.inputIdx:upperIdx]) -} - -func (l *tomlLexer) follow(next string) bool { - return next == l.peekString(len(next)) -} - -// Error management - -func (l *tomlLexer) errorf(format string, args ...interface{}) tomlLexStateFn { - l.tokens = append(l.tokens, token{ - Position: Position{l.line, l.col}, - typ: tokenError, - val: fmt.Sprintf(format, args...), - }) - return nil -} - -// State functions - -func (l *tomlLexer) lexVoid() tomlLexStateFn { - for { - next := l.peek() - switch next { - case '[': - return l.lexTableKey - case '#': - return l.lexComment(l.lexVoid) - case '=': - return l.lexEqual - case '\r': - fallthrough - case '\n': - l.skip() - continue - } - - if isSpace(next) { - l.skip() - } - - if l.depth > 0 { - return l.lexRvalue - } - - if isKeyStartChar(next) { - return l.lexKey - } - - if next == eof { - l.next() - break - } - } - - l.emit(tokenEOF) - return nil -} - -func (l *tomlLexer) lexRvalue() tomlLexStateFn { - for { - next := l.peek() - switch next { - case '.': - return l.errorf("cannot start float with a dot") - case '=': - return l.lexEqual - case '[': - l.depth++ - return l.lexLeftBracket - case ']': - l.depth-- - return l.lexRightBracket - case '{': - return l.lexLeftCurlyBrace - case '}': - return l.lexRightCurlyBrace - case '#': - return l.lexComment(l.lexRvalue) - case '"': - return l.lexString - case '\'': - return l.lexLiteralString - case ',': - return l.lexComma - case '\r': - fallthrough - case '\n': - l.skip() - if l.depth == 0 { - return l.lexVoid - } - return l.lexRvalue - case '_': - return l.errorf("cannot start number with underscore") - } - - if l.follow("true") { - return l.lexTrue - } - - if l.follow("false") { - return l.lexFalse - } - - if l.follow("inf") { - return l.lexInf - } - - if l.follow("nan") { - return l.lexNan - } - - if isSpace(next) { - l.skip() - continue - } - - if next == eof { - l.next() - break - } - - possibleDate := l.peekString(35) - dateMatch := dateRegexp.FindString(possibleDate) - if dateMatch != "" { - l.fastForward(len(dateMatch)) - return l.lexDate - } - - if next == '+' || next == '-' || isDigit(next) { - return l.lexNumber - } - - if isAlphanumeric(next) { - return l.lexKey - } - - return l.errorf("no value can start with %c", next) - } - - l.emit(tokenEOF) - return nil -} - -func (l *tomlLexer) lexLeftCurlyBrace() tomlLexStateFn { - l.next() - l.emit(tokenLeftCurlyBrace) - return l.lexRvalue -} - -func (l *tomlLexer) lexRightCurlyBrace() tomlLexStateFn { - l.next() - l.emit(tokenRightCurlyBrace) - return l.lexRvalue -} - -func (l *tomlLexer) lexDate() tomlLexStateFn { - l.emit(tokenDate) - return l.lexRvalue -} - -func (l *tomlLexer) lexTrue() tomlLexStateFn { - l.fastForward(4) - l.emit(tokenTrue) - return l.lexRvalue -} - -func (l *tomlLexer) lexFalse() tomlLexStateFn { - l.fastForward(5) - l.emit(tokenFalse) - return l.lexRvalue -} - -func (l *tomlLexer) lexInf() tomlLexStateFn { - l.fastForward(3) - l.emit(tokenInf) - return l.lexRvalue -} - -func (l *tomlLexer) lexNan() tomlLexStateFn { - l.fastForward(3) - l.emit(tokenNan) - return l.lexRvalue -} - -func (l *tomlLexer) lexEqual() tomlLexStateFn { - l.next() - l.emit(tokenEqual) - return l.lexRvalue -} - -func (l *tomlLexer) lexComma() tomlLexStateFn { - l.next() - l.emit(tokenComma) - return l.lexRvalue -} - -// Parse the key and emits its value without escape sequences. -// bare keys, basic string keys and literal string keys are supported. -func (l *tomlLexer) lexKey() tomlLexStateFn { - growingString := "" - - for r := l.peek(); isKeyChar(r) || r == '\n' || r == '\r'; r = l.peek() { - if r == '"' { - l.next() - str, err := l.lexStringAsString(`"`, false, true) - if err != nil { - return l.errorf(err.Error()) - } - growingString += str - l.next() - continue - } else if r == '\'' { - l.next() - str, err := l.lexLiteralStringAsString(`'`, false) - if err != nil { - return l.errorf(err.Error()) - } - growingString += str - l.next() - continue - } else if r == '\n' { - return l.errorf("keys cannot contain new lines") - } else if isSpace(r) { - break - } else if !isValidBareChar(r) { - return l.errorf("keys cannot contain %c character", r) - } - growingString += string(r) - l.next() - } - l.emitWithValue(tokenKey, growingString) - return l.lexVoid -} - -func (l *tomlLexer) lexComment(previousState tomlLexStateFn) tomlLexStateFn { - return func() tomlLexStateFn { - for next := l.peek(); next != '\n' && next != eof; next = l.peek() { - if next == '\r' && l.follow("\r\n") { - break - } - l.next() - } - l.ignore() - return previousState - } -} - -func (l *tomlLexer) lexLeftBracket() tomlLexStateFn { - l.next() - l.emit(tokenLeftBracket) - return l.lexRvalue -} - -func (l *tomlLexer) lexLiteralStringAsString(terminator string, discardLeadingNewLine bool) (string, error) { - growingString := "" - - if discardLeadingNewLine { - if l.follow("\r\n") { - l.skip() - l.skip() - } else if l.peek() == '\n' { - l.skip() - } - } - - // find end of string - for { - if l.follow(terminator) { - return growingString, nil - } - - next := l.peek() - if next == eof { - break - } - growingString += string(l.next()) - } - - return "", errors.New("unclosed string") -} - -func (l *tomlLexer) lexLiteralString() tomlLexStateFn { - l.skip() - - // handle special case for triple-quote - terminator := "'" - discardLeadingNewLine := false - if l.follow("''") { - l.skip() - l.skip() - terminator = "'''" - discardLeadingNewLine = true - } - - str, err := l.lexLiteralStringAsString(terminator, discardLeadingNewLine) - if err != nil { - return l.errorf(err.Error()) - } - - l.emitWithValue(tokenString, str) - l.fastForward(len(terminator)) - l.ignore() - return l.lexRvalue -} - -// Lex a string and return the results as a string. -// Terminator is the substring indicating the end of the token. -// The resulting string does not include the terminator. -func (l *tomlLexer) lexStringAsString(terminator string, discardLeadingNewLine, acceptNewLines bool) (string, error) { - growingString := "" - - if discardLeadingNewLine { - if l.follow("\r\n") { - l.skip() - l.skip() - } else if l.peek() == '\n' { - l.skip() - } - } - - for { - if l.follow(terminator) { - return growingString, nil - } - - if l.follow("\\") { - l.next() - switch l.peek() { - case '\r': - fallthrough - case '\n': - fallthrough - case '\t': - fallthrough - case ' ': - // skip all whitespace chars following backslash - for strings.ContainsRune("\r\n\t ", l.peek()) { - l.next() - } - case '"': - growingString += "\"" - l.next() - case 'n': - growingString += "\n" - l.next() - case 'b': - growingString += "\b" - l.next() - case 'f': - growingString += "\f" - l.next() - case '/': - growingString += "/" - l.next() - case 't': - growingString += "\t" - l.next() - case 'r': - growingString += "\r" - l.next() - case '\\': - growingString += "\\" - l.next() - case 'u': - l.next() - code := "" - for i := 0; i < 4; i++ { - c := l.peek() - if !isHexDigit(c) { - return "", errors.New("unfinished unicode escape") - } - l.next() - code = code + string(c) - } - intcode, err := strconv.ParseInt(code, 16, 32) - if err != nil { - return "", errors.New("invalid unicode escape: \\u" + code) - } - growingString += string(rune(intcode)) - case 'U': - l.next() - code := "" - for i := 0; i < 8; i++ { - c := l.peek() - if !isHexDigit(c) { - return "", errors.New("unfinished unicode escape") - } - l.next() - code = code + string(c) - } - intcode, err := strconv.ParseInt(code, 16, 64) - if err != nil { - return "", errors.New("invalid unicode escape: \\U" + code) - } - growingString += string(rune(intcode)) - default: - return "", errors.New("invalid escape sequence: \\" + string(l.peek())) - } - } else { - r := l.peek() - - if 0x00 <= r && r <= 0x1F && !(acceptNewLines && (r == '\n' || r == '\r')) { - return "", fmt.Errorf("unescaped control character %U", r) - } - l.next() - growingString += string(r) - } - - if l.peek() == eof { - break - } - } - - return "", errors.New("unclosed string") -} - -func (l *tomlLexer) lexString() tomlLexStateFn { - l.skip() - - // handle special case for triple-quote - terminator := `"` - discardLeadingNewLine := false - acceptNewLines := false - if l.follow(`""`) { - l.skip() - l.skip() - terminator = `"""` - discardLeadingNewLine = true - acceptNewLines = true - } - - str, err := l.lexStringAsString(terminator, discardLeadingNewLine, acceptNewLines) - - if err != nil { - return l.errorf(err.Error()) - } - - l.emitWithValue(tokenString, str) - l.fastForward(len(terminator)) - l.ignore() - return l.lexRvalue -} - -func (l *tomlLexer) lexTableKey() tomlLexStateFn { - l.next() - - if l.peek() == '[' { - // token '[[' signifies an array of tables - l.next() - l.emit(tokenDoubleLeftBracket) - return l.lexInsideTableArrayKey - } - // vanilla table key - l.emit(tokenLeftBracket) - return l.lexInsideTableKey -} - -// Parse the key till "]]", but only bare keys are supported -func (l *tomlLexer) lexInsideTableArrayKey() tomlLexStateFn { - for r := l.peek(); r != eof; r = l.peek() { - switch r { - case ']': - if l.currentTokenStop > l.currentTokenStart { - l.emit(tokenKeyGroupArray) - } - l.next() - if l.peek() != ']' { - break - } - l.next() - l.emit(tokenDoubleRightBracket) - return l.lexVoid - case '[': - return l.errorf("table array key cannot contain ']'") - default: - l.next() - } - } - return l.errorf("unclosed table array key") -} - -// Parse the key till "]" but only bare keys are supported -func (l *tomlLexer) lexInsideTableKey() tomlLexStateFn { - for r := l.peek(); r != eof; r = l.peek() { - switch r { - case ']': - if l.currentTokenStop > l.currentTokenStart { - l.emit(tokenKeyGroup) - } - l.next() - l.emit(tokenRightBracket) - return l.lexVoid - case '[': - return l.errorf("table key cannot contain ']'") - default: - l.next() - } - } - return l.errorf("unclosed table key") -} - -func (l *tomlLexer) lexRightBracket() tomlLexStateFn { - l.next() - l.emit(tokenRightBracket) - return l.lexRvalue -} - -type validRuneFn func(r rune) bool - -func isValidHexRune(r rune) bool { - return r >= 'a' && r <= 'f' || - r >= 'A' && r <= 'F' || - r >= '0' && r <= '9' || - r == '_' -} - -func isValidOctalRune(r rune) bool { - return r >= '0' && r <= '7' || r == '_' -} - -func isValidBinaryRune(r rune) bool { - return r == '0' || r == '1' || r == '_' -} - -func (l *tomlLexer) lexNumber() tomlLexStateFn { - r := l.peek() - - if r == '0' { - follow := l.peekString(2) - if len(follow) == 2 { - var isValidRune validRuneFn - switch follow[1] { - case 'x': - isValidRune = isValidHexRune - case 'o': - isValidRune = isValidOctalRune - case 'b': - isValidRune = isValidBinaryRune - default: - if follow[1] >= 'a' && follow[1] <= 'z' || follow[1] >= 'A' && follow[1] <= 'Z' { - return l.errorf("unknown number base: %s. possible options are x (hex) o (octal) b (binary)", string(follow[1])) - } - } - - if isValidRune != nil { - l.next() - l.next() - digitSeen := false - for { - next := l.peek() - if !isValidRune(next) { - break - } - digitSeen = true - l.next() - } - - if !digitSeen { - return l.errorf("number needs at least one digit") - } - - l.emit(tokenInteger) - - return l.lexRvalue - } - } - } - - if r == '+' || r == '-' { - l.next() - if l.follow("inf") { - return l.lexInf - } - if l.follow("nan") { - return l.lexNan - } - } - - pointSeen := false - expSeen := false - digitSeen := false - for { - next := l.peek() - if next == '.' { - if pointSeen { - return l.errorf("cannot have two dots in one float") - } - l.next() - if !isDigit(l.peek()) { - return l.errorf("float cannot end with a dot") - } - pointSeen = true - } else if next == 'e' || next == 'E' { - expSeen = true - l.next() - r := l.peek() - if r == '+' || r == '-' { - l.next() - } - } else if isDigit(next) { - digitSeen = true - l.next() - } else if next == '_' { - l.next() - } else { - break - } - if pointSeen && !digitSeen { - return l.errorf("cannot start float with a dot") - } - } - - if !digitSeen { - return l.errorf("no digit in that number") - } - if pointSeen || expSeen { - l.emit(tokenFloat) - } else { - l.emit(tokenInteger) - } - return l.lexRvalue -} - -func (l *tomlLexer) run() { - for state := l.lexVoid; state != nil; { - state = state() - } -} - -func init() { - dateRegexp = regexp.MustCompile(`^\d{1,4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{1,9})?(Z|[+-]\d{2}:\d{2})`) -} - -// Entry point -func lexToml(inputBytes []byte) []token { - runes := bytes.Runes(inputBytes) - l := &tomlLexer{ - input: runes, - tokens: make([]token, 0, 256), - line: 1, - col: 1, - endbufferLine: 1, - endbufferCol: 1, - } - l.run() - return l.tokens -} diff --git a/vendor/github.com/pelletier/go-toml/marshal.go b/vendor/github.com/pelletier/go-toml/marshal.go deleted file mode 100644 index b5a241505..000000000 --- a/vendor/github.com/pelletier/go-toml/marshal.go +++ /dev/null @@ -1,600 +0,0 @@ -package toml - -import ( - "bytes" - "errors" - "fmt" - "io" - "reflect" - "strconv" - "strings" - "time" -) - -type tomlOpts struct { - name string - comment string - commented bool - include bool - omitempty bool -} - -type encOpts struct { - quoteMapKeys bool - arraysOneElementPerLine bool -} - -var encOptsDefaults = encOpts{ - quoteMapKeys: false, -} - -var timeType = reflect.TypeOf(time.Time{}) -var marshalerType = reflect.TypeOf(new(Marshaler)).Elem() - -// Check if the given marshall type maps to a Tree primitive -func isPrimitive(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isPrimitive(mtype.Elem()) - case reflect.Bool: - return true - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return true - case reflect.Float32, reflect.Float64: - return true - case reflect.String: - return true - case reflect.Struct: - return mtype == timeType || isCustomMarshaler(mtype) - default: - return false - } -} - -// Check if the given marshall type maps to a Tree slice -func isTreeSlice(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Slice: - return !isOtherSlice(mtype) - default: - return false - } -} - -// Check if the given marshall type maps to a non-Tree slice -func isOtherSlice(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Ptr: - return isOtherSlice(mtype.Elem()) - case reflect.Slice: - return isPrimitive(mtype.Elem()) || isOtherSlice(mtype.Elem()) - default: - return false - } -} - -// Check if the given marshall type maps to a Tree -func isTree(mtype reflect.Type) bool { - switch mtype.Kind() { - case reflect.Map: - return true - case reflect.Struct: - return !isPrimitive(mtype) - default: - return false - } -} - -func isCustomMarshaler(mtype reflect.Type) bool { - return mtype.Implements(marshalerType) -} - -func callCustomMarshaler(mval reflect.Value) ([]byte, error) { - return mval.Interface().(Marshaler).MarshalTOML() -} - -// Marshaler is the interface implemented by types that -// can marshal themselves into valid TOML. -type Marshaler interface { - MarshalTOML() ([]byte, error) -} - -/* -Marshal returns the TOML encoding of v. Behavior is similar to the Go json -encoder, except that there is no concept of a Marshaler interface or MarshalTOML -function for sub-structs, and currently only definite types can be marshaled -(i.e. no `interface{}`). - -The following struct annotations are supported: - - toml:"Field" Overrides the field's name to output. - omitempty When set, empty values and groups are not emitted. - comment:"comment" Emits a # comment on the same line. This supports new lines. - commented:"true" Emits the value as commented. - -Note that pointers are automatically assigned the "omitempty" option, as TOML -explicitly does not handle null values (saying instead the label should be -dropped). - -Tree structural types and corresponding marshal types: - - *Tree (*)struct, (*)map[string]interface{} - []*Tree (*)[](*)struct, (*)[](*)map[string]interface{} - []interface{} (as interface{}) (*)[]primitive, (*)[]([]interface{}) - interface{} (*)primitive - -Tree primitive types and corresponding marshal types: - - uint64 uint, uint8-uint64, pointers to same - int64 int, int8-uint64, pointers to same - float64 float32, float64, pointers to same - string string, pointers to same - bool bool, pointers to same - time.Time time.Time{}, pointers to same -*/ -func Marshal(v interface{}) ([]byte, error) { - return NewEncoder(nil).marshal(v) -} - -// Encoder writes TOML values to an output stream. -type Encoder struct { - w io.Writer - encOpts -} - -// NewEncoder returns a new encoder that writes to w. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - w: w, - encOpts: encOptsDefaults, - } -} - -// Encode writes the TOML encoding of v to the stream. -// -// See the documentation for Marshal for details. -func (e *Encoder) Encode(v interface{}) error { - b, err := e.marshal(v) - if err != nil { - return err - } - if _, err := e.w.Write(b); err != nil { - return err - } - return nil -} - -// QuoteMapKeys sets up the encoder to encode -// maps with string type keys with quoted TOML keys. -// -// This relieves the character limitations on map keys. -func (e *Encoder) QuoteMapKeys(v bool) *Encoder { - e.quoteMapKeys = v - return e -} - -// ArraysWithOneElementPerLine sets up the encoder to encode arrays -// with more than one element on multiple lines instead of one. -// -// For example: -// -// A = [1,2,3] -// -// Becomes -// -// A = [ -// 1, -// 2, -// 3 -// ] -func (e *Encoder) ArraysWithOneElementPerLine(v bool) *Encoder { - e.arraysOneElementPerLine = v - return e -} - -func (e *Encoder) marshal(v interface{}) ([]byte, error) { - mtype := reflect.TypeOf(v) - if mtype.Kind() != reflect.Struct { - return []byte{}, errors.New("Only a struct can be marshaled to TOML") - } - sval := reflect.ValueOf(v) - if isCustomMarshaler(mtype) { - return callCustomMarshaler(sval) - } - t, err := e.valueToTree(mtype, sval) - if err != nil { - return []byte{}, err - } - - var buf bytes.Buffer - _, err = t.writeTo(&buf, "", "", 0, e.arraysOneElementPerLine) - - return buf.Bytes(), err -} - -// Convert given marshal struct or map value to toml tree -func (e *Encoder) valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, error) { - if mtype.Kind() == reflect.Ptr { - return e.valueToTree(mtype.Elem(), mval.Elem()) - } - tval := newTree() - switch mtype.Kind() { - case reflect.Struct: - for i := 0; i < mtype.NumField(); i++ { - mtypef, mvalf := mtype.Field(i), mval.Field(i) - opts := tomlOptions(mtypef) - if opts.include && (!opts.omitempty || !isZero(mvalf)) { - val, err := e.valueToToml(mtypef.Type, mvalf) - if err != nil { - return nil, err - } - tval.SetWithComment(opts.name, opts.comment, opts.commented, val) - } - } - case reflect.Map: - for _, key := range mval.MapKeys() { - mvalf := mval.MapIndex(key) - val, err := e.valueToToml(mtype.Elem(), mvalf) - if err != nil { - return nil, err - } - if e.quoteMapKeys { - keyStr, err := tomlValueStringRepresentation(key.String(), "", e.arraysOneElementPerLine) - if err != nil { - return nil, err - } - tval.SetPath([]string{keyStr}, val) - } else { - tval.Set(key.String(), val) - } - } - } - return tval, nil -} - -// Convert given marshal slice to slice of Toml trees -func (e *Encoder) valueToTreeSlice(mtype reflect.Type, mval reflect.Value) ([]*Tree, error) { - tval := make([]*Tree, mval.Len(), mval.Len()) - for i := 0; i < mval.Len(); i++ { - val, err := e.valueToTree(mtype.Elem(), mval.Index(i)) - if err != nil { - return nil, err - } - tval[i] = val - } - return tval, nil -} - -// Convert given marshal slice to slice of toml values -func (e *Encoder) valueToOtherSlice(mtype reflect.Type, mval reflect.Value) (interface{}, error) { - tval := make([]interface{}, mval.Len(), mval.Len()) - for i := 0; i < mval.Len(); i++ { - val, err := e.valueToToml(mtype.Elem(), mval.Index(i)) - if err != nil { - return nil, err - } - tval[i] = val - } - return tval, nil -} - -// Convert given marshal value to toml value -func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface{}, error) { - if mtype.Kind() == reflect.Ptr { - return e.valueToToml(mtype.Elem(), mval.Elem()) - } - switch { - case isCustomMarshaler(mtype): - return callCustomMarshaler(mval) - case isTree(mtype): - return e.valueToTree(mtype, mval) - case isTreeSlice(mtype): - return e.valueToTreeSlice(mtype, mval) - case isOtherSlice(mtype): - return e.valueToOtherSlice(mtype, mval) - default: - switch mtype.Kind() { - case reflect.Bool: - return mval.Bool(), nil - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return mval.Int(), nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return mval.Uint(), nil - case reflect.Float32, reflect.Float64: - return mval.Float(), nil - case reflect.String: - return mval.String(), nil - case reflect.Struct: - return mval.Interface().(time.Time), nil - default: - return nil, fmt.Errorf("Marshal can't handle %v(%v)", mtype, mtype.Kind()) - } - } -} - -// Unmarshal attempts to unmarshal the Tree into a Go struct pointed by v. -// Neither Unmarshaler interfaces nor UnmarshalTOML functions are supported for -// sub-structs, and only definite types can be unmarshaled. -func (t *Tree) Unmarshal(v interface{}) error { - d := Decoder{tval: t} - return d.unmarshal(v) -} - -// Marshal returns the TOML encoding of Tree. -// See Marshal() documentation for types mapping table. -func (t *Tree) Marshal() ([]byte, error) { - var buf bytes.Buffer - err := NewEncoder(&buf).Encode(t) - return buf.Bytes(), err -} - -// Unmarshal parses the TOML-encoded data and stores the result in the value -// pointed to by v. Behavior is similar to the Go json encoder, except that there -// is no concept of an Unmarshaler interface or UnmarshalTOML function for -// sub-structs, and currently only definite types can be unmarshaled to (i.e. no -// `interface{}`). -// -// The following struct annotations are supported: -// -// toml:"Field" Overrides the field's name to map to. -// -// See Marshal() documentation for types mapping table. -func Unmarshal(data []byte, v interface{}) error { - t, err := LoadReader(bytes.NewReader(data)) - if err != nil { - return err - } - return t.Unmarshal(v) -} - -// Decoder reads and decodes TOML values from an input stream. -type Decoder struct { - r io.Reader - tval *Tree - encOpts -} - -// NewDecoder returns a new decoder that reads from r. -func NewDecoder(r io.Reader) *Decoder { - return &Decoder{ - r: r, - encOpts: encOptsDefaults, - } -} - -// Decode reads a TOML-encoded value from it's input -// and unmarshals it in the value pointed at by v. -// -// See the documentation for Marshal for details. -func (d *Decoder) Decode(v interface{}) error { - var err error - d.tval, err = LoadReader(d.r) - if err != nil { - return err - } - return d.unmarshal(v) -} - -func (d *Decoder) unmarshal(v interface{}) error { - mtype := reflect.TypeOf(v) - if mtype.Kind() != reflect.Ptr || mtype.Elem().Kind() != reflect.Struct { - return errors.New("Only a pointer to struct can be unmarshaled from TOML") - } - - sval, err := d.valueFromTree(mtype.Elem(), d.tval) - if err != nil { - return err - } - reflect.ValueOf(v).Elem().Set(sval) - return nil -} - -// Convert toml tree to marshal struct or map, using marshal type -func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree) (reflect.Value, error) { - if mtype.Kind() == reflect.Ptr { - return d.unwrapPointer(mtype, tval) - } - var mval reflect.Value - switch mtype.Kind() { - case reflect.Struct: - mval = reflect.New(mtype).Elem() - for i := 0; i < mtype.NumField(); i++ { - mtypef := mtype.Field(i) - opts := tomlOptions(mtypef) - if opts.include { - baseKey := opts.name - keysToTry := []string{baseKey, strings.ToLower(baseKey), strings.ToTitle(baseKey)} - for _, key := range keysToTry { - exists := tval.Has(key) - if !exists { - continue - } - val := tval.Get(key) - mvalf, err := d.valueFromToml(mtypef.Type, val) - if err != nil { - return mval, formatError(err, tval.GetPosition(key)) - } - mval.Field(i).Set(mvalf) - break - } - } - } - case reflect.Map: - mval = reflect.MakeMap(mtype) - for _, key := range tval.Keys() { - // TODO: path splits key - val := tval.GetPath([]string{key}) - mvalf, err := d.valueFromToml(mtype.Elem(), val) - if err != nil { - return mval, formatError(err, tval.GetPosition(key)) - } - mval.SetMapIndex(reflect.ValueOf(key), mvalf) - } - } - return mval, nil -} - -// Convert toml value to marshal struct/map slice, using marshal type -func (d *Decoder) valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect.Value, error) { - mval := reflect.MakeSlice(mtype, len(tval), len(tval)) - for i := 0; i < len(tval); i++ { - val, err := d.valueFromTree(mtype.Elem(), tval[i]) - if err != nil { - return mval, err - } - mval.Index(i).Set(val) - } - return mval, nil -} - -// Convert toml value to marshal primitive slice, using marshal type -func (d *Decoder) valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (reflect.Value, error) { - mval := reflect.MakeSlice(mtype, len(tval), len(tval)) - for i := 0; i < len(tval); i++ { - val, err := d.valueFromToml(mtype.Elem(), tval[i]) - if err != nil { - return mval, err - } - mval.Index(i).Set(val) - } - return mval, nil -} - -// Convert toml value to marshal value, using marshal type -func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}) (reflect.Value, error) { - if mtype.Kind() == reflect.Ptr { - return d.unwrapPointer(mtype, tval) - } - - switch tval.(type) { - case *Tree: - if isTree(mtype) { - return d.valueFromTree(mtype, tval.(*Tree)) - } - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a tree", tval, tval) - case []*Tree: - if isTreeSlice(mtype) { - return d.valueFromTreeSlice(mtype, tval.([]*Tree)) - } - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to trees", tval, tval) - case []interface{}: - if isOtherSlice(mtype) { - return d.valueFromOtherSlice(mtype, tval.([]interface{})) - } - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a slice", tval, tval) - default: - switch mtype.Kind() { - case reflect.Bool, reflect.Struct: - val := reflect.ValueOf(tval) - // if this passes for when mtype is reflect.Struct, tval is a time.Time - if !val.Type().ConvertibleTo(mtype) { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - case reflect.String: - val := reflect.ValueOf(tval) - // stupidly, int64 is convertible to string. So special case this. - if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Int64 { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - val := reflect.ValueOf(tval) - if !val.Type().ConvertibleTo(mtype) { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - if reflect.Indirect(reflect.New(mtype)).OverflowInt(val.Int()) { - return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - val := reflect.ValueOf(tval) - if !val.Type().ConvertibleTo(mtype) { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - if val.Int() < 0 { - return reflect.ValueOf(nil), fmt.Errorf("%v(%T) is negative so does not fit in %v", tval, tval, mtype.String()) - } - if reflect.Indirect(reflect.New(mtype)).OverflowUint(uint64(val.Int())) { - return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - case reflect.Float32, reflect.Float64: - val := reflect.ValueOf(tval) - if !val.Type().ConvertibleTo(mtype) { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) - } - if reflect.Indirect(reflect.New(mtype)).OverflowFloat(val.Float()) { - return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) - } - - return val.Convert(mtype), nil - default: - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind()) - } - } -} - -func (d *Decoder) unwrapPointer(mtype reflect.Type, tval interface{}) (reflect.Value, error) { - val, err := d.valueFromToml(mtype.Elem(), tval) - if err != nil { - return reflect.ValueOf(nil), err - } - mval := reflect.New(mtype.Elem()) - mval.Elem().Set(val) - return mval, nil -} - -func tomlOptions(vf reflect.StructField) tomlOpts { - tag := vf.Tag.Get("toml") - parse := strings.Split(tag, ",") - var comment string - if c := vf.Tag.Get("comment"); c != "" { - comment = c - } - commented, _ := strconv.ParseBool(vf.Tag.Get("commented")) - result := tomlOpts{name: vf.Name, comment: comment, commented: commented, include: true, omitempty: false} - if parse[0] != "" { - if parse[0] == "-" && len(parse) == 1 { - result.include = false - } else { - result.name = strings.Trim(parse[0], " ") - } - } - if vf.PkgPath != "" { - result.include = false - } - if len(parse) > 1 && strings.Trim(parse[1], " ") == "omitempty" { - result.omitempty = true - } - if vf.Type.Kind() == reflect.Ptr { - result.omitempty = true - } - return result -} - -func isZero(val reflect.Value) bool { - switch val.Type().Kind() { - case reflect.Map: - fallthrough - case reflect.Array: - fallthrough - case reflect.Slice: - return val.Len() == 0 - default: - return reflect.DeepEqual(val.Interface(), reflect.Zero(val.Type()).Interface()) - } -} - -func formatError(err error, pos Position) error { - if err.Error()[0] == '(' { // Error already contains position information - return err - } - return fmt.Errorf("%s: %s", pos, err) -} diff --git a/vendor/github.com/pelletier/go-toml/parser.go b/vendor/github.com/pelletier/go-toml/parser.go deleted file mode 100644 index 2d27599a9..000000000 --- a/vendor/github.com/pelletier/go-toml/parser.go +++ /dev/null @@ -1,430 +0,0 @@ -// TOML Parser. - -package toml - -import ( - "errors" - "fmt" - "math" - "reflect" - "regexp" - "strconv" - "strings" - "time" -) - -type tomlParser struct { - flowIdx int - flow []token - tree *Tree - currentTable []string - seenTableKeys []string -} - -type tomlParserStateFn func() tomlParserStateFn - -// Formats and panics an error message based on a token -func (p *tomlParser) raiseError(tok *token, msg string, args ...interface{}) { - panic(tok.Position.String() + ": " + fmt.Sprintf(msg, args...)) -} - -func (p *tomlParser) run() { - for state := p.parseStart; state != nil; { - state = state() - } -} - -func (p *tomlParser) peek() *token { - if p.flowIdx >= len(p.flow) { - return nil - } - return &p.flow[p.flowIdx] -} - -func (p *tomlParser) assume(typ tokenType) { - tok := p.getToken() - if tok == nil { - p.raiseError(tok, "was expecting token %s, but token stream is empty", tok) - } - if tok.typ != typ { - p.raiseError(tok, "was expecting token %s, but got %s instead", typ, tok) - } -} - -func (p *tomlParser) getToken() *token { - tok := p.peek() - if tok == nil { - return nil - } - p.flowIdx++ - return tok -} - -func (p *tomlParser) parseStart() tomlParserStateFn { - tok := p.peek() - - // end of stream, parsing is finished - if tok == nil { - return nil - } - - switch tok.typ { - case tokenDoubleLeftBracket: - return p.parseGroupArray - case tokenLeftBracket: - return p.parseGroup - case tokenKey: - return p.parseAssign - case tokenEOF: - return nil - default: - p.raiseError(tok, "unexpected token") - } - return nil -} - -func (p *tomlParser) parseGroupArray() tomlParserStateFn { - startToken := p.getToken() // discard the [[ - key := p.getToken() - if key.typ != tokenKeyGroupArray { - p.raiseError(key, "unexpected token %s, was expecting a table array key", key) - } - - // get or create table array element at the indicated part in the path - keys, err := parseKey(key.val) - if err != nil { - p.raiseError(key, "invalid table array key: %s", err) - } - p.tree.createSubTree(keys[:len(keys)-1], startToken.Position) // create parent entries - destTree := p.tree.GetPath(keys) - var array []*Tree - if destTree == nil { - array = make([]*Tree, 0) - } else if target, ok := destTree.([]*Tree); ok && target != nil { - array = destTree.([]*Tree) - } else { - p.raiseError(key, "key %s is already assigned and not of type table array", key) - } - p.currentTable = keys - - // add a new tree to the end of the table array - newTree := newTree() - newTree.position = startToken.Position - array = append(array, newTree) - p.tree.SetPath(p.currentTable, array) - - // remove all keys that were children of this table array - prefix := key.val + "." - found := false - for ii := 0; ii < len(p.seenTableKeys); { - tableKey := p.seenTableKeys[ii] - if strings.HasPrefix(tableKey, prefix) { - p.seenTableKeys = append(p.seenTableKeys[:ii], p.seenTableKeys[ii+1:]...) - } else { - found = (tableKey == key.val) - ii++ - } - } - - // keep this key name from use by other kinds of assignments - if !found { - p.seenTableKeys = append(p.seenTableKeys, key.val) - } - - // move to next parser state - p.assume(tokenDoubleRightBracket) - return p.parseStart -} - -func (p *tomlParser) parseGroup() tomlParserStateFn { - startToken := p.getToken() // discard the [ - key := p.getToken() - if key.typ != tokenKeyGroup { - p.raiseError(key, "unexpected token %s, was expecting a table key", key) - } - for _, item := range p.seenTableKeys { - if item == key.val { - p.raiseError(key, "duplicated tables") - } - } - - p.seenTableKeys = append(p.seenTableKeys, key.val) - keys, err := parseKey(key.val) - if err != nil { - p.raiseError(key, "invalid table array key: %s", err) - } - if err := p.tree.createSubTree(keys, startToken.Position); err != nil { - p.raiseError(key, "%s", err) - } - p.assume(tokenRightBracket) - p.currentTable = keys - return p.parseStart -} - -func (p *tomlParser) parseAssign() tomlParserStateFn { - key := p.getToken() - p.assume(tokenEqual) - - value := p.parseRvalue() - var tableKey []string - if len(p.currentTable) > 0 { - tableKey = p.currentTable - } else { - tableKey = []string{} - } - - // find the table to assign, looking out for arrays of tables - var targetNode *Tree - switch node := p.tree.GetPath(tableKey).(type) { - case []*Tree: - targetNode = node[len(node)-1] - case *Tree: - targetNode = node - default: - p.raiseError(key, "Unknown table type for path: %s", - strings.Join(tableKey, ".")) - } - - // assign value to the found table - keyVals := []string{key.val} - if len(keyVals) != 1 { - p.raiseError(key, "Invalid key") - } - keyVal := keyVals[0] - localKey := []string{keyVal} - finalKey := append(tableKey, keyVal) - if targetNode.GetPath(localKey) != nil { - p.raiseError(key, "The following key was defined twice: %s", - strings.Join(finalKey, ".")) - } - var toInsert interface{} - - switch value.(type) { - case *Tree, []*Tree: - toInsert = value - default: - toInsert = &tomlValue{value: value, position: key.Position} - } - targetNode.values[keyVal] = toInsert - return p.parseStart -} - -var numberUnderscoreInvalidRegexp *regexp.Regexp -var hexNumberUnderscoreInvalidRegexp *regexp.Regexp - -func numberContainsInvalidUnderscore(value string) error { - if numberUnderscoreInvalidRegexp.MatchString(value) { - return errors.New("invalid use of _ in number") - } - return nil -} - -func hexNumberContainsInvalidUnderscore(value string) error { - if hexNumberUnderscoreInvalidRegexp.MatchString(value) { - return errors.New("invalid use of _ in hex number") - } - return nil -} - -func cleanupNumberToken(value string) string { - cleanedVal := strings.Replace(value, "_", "", -1) - return cleanedVal -} - -func (p *tomlParser) parseRvalue() interface{} { - tok := p.getToken() - if tok == nil || tok.typ == tokenEOF { - p.raiseError(tok, "expecting a value") - } - - switch tok.typ { - case tokenString: - return tok.val - case tokenTrue: - return true - case tokenFalse: - return false - case tokenInf: - if tok.val[0] == '-' { - return math.Inf(-1) - } - return math.Inf(1) - case tokenNan: - return math.NaN() - case tokenInteger: - cleanedVal := cleanupNumberToken(tok.val) - var err error - var val int64 - if len(cleanedVal) >= 3 && cleanedVal[0] == '0' { - switch cleanedVal[1] { - case 'x': - err = hexNumberContainsInvalidUnderscore(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - val, err = strconv.ParseInt(cleanedVal[2:], 16, 64) - case 'o': - err = numberContainsInvalidUnderscore(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - val, err = strconv.ParseInt(cleanedVal[2:], 8, 64) - case 'b': - err = numberContainsInvalidUnderscore(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - val, err = strconv.ParseInt(cleanedVal[2:], 2, 64) - default: - panic("invalid base") // the lexer should catch this first - } - } else { - err = numberContainsInvalidUnderscore(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - val, err = strconv.ParseInt(cleanedVal, 10, 64) - } - if err != nil { - p.raiseError(tok, "%s", err) - } - return val - case tokenFloat: - err := numberContainsInvalidUnderscore(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - cleanedVal := cleanupNumberToken(tok.val) - val, err := strconv.ParseFloat(cleanedVal, 64) - if err != nil { - p.raiseError(tok, "%s", err) - } - return val - case tokenDate: - val, err := time.ParseInLocation(time.RFC3339Nano, tok.val, time.UTC) - if err != nil { - p.raiseError(tok, "%s", err) - } - return val - case tokenLeftBracket: - return p.parseArray() - case tokenLeftCurlyBrace: - return p.parseInlineTable() - case tokenEqual: - p.raiseError(tok, "cannot have multiple equals for the same key") - case tokenError: - p.raiseError(tok, "%s", tok) - } - - p.raiseError(tok, "never reached") - - return nil -} - -func tokenIsComma(t *token) bool { - return t != nil && t.typ == tokenComma -} - -func (p *tomlParser) parseInlineTable() *Tree { - tree := newTree() - var previous *token -Loop: - for { - follow := p.peek() - if follow == nil || follow.typ == tokenEOF { - p.raiseError(follow, "unterminated inline table") - } - switch follow.typ { - case tokenRightCurlyBrace: - p.getToken() - break Loop - case tokenKey: - if !tokenIsComma(previous) && previous != nil { - p.raiseError(follow, "comma expected between fields in inline table") - } - key := p.getToken() - p.assume(tokenEqual) - value := p.parseRvalue() - tree.Set(key.val, value) - case tokenComma: - if previous == nil { - p.raiseError(follow, "inline table cannot start with a comma") - } - if tokenIsComma(previous) { - p.raiseError(follow, "need field between two commas in inline table") - } - p.getToken() - default: - p.raiseError(follow, "unexpected token type in inline table: %s", follow.String()) - } - previous = follow - } - if tokenIsComma(previous) { - p.raiseError(previous, "trailing comma at the end of inline table") - } - return tree -} - -func (p *tomlParser) parseArray() interface{} { - var array []interface{} - arrayType := reflect.TypeOf(nil) - for { - follow := p.peek() - if follow == nil || follow.typ == tokenEOF { - p.raiseError(follow, "unterminated array") - } - if follow.typ == tokenRightBracket { - p.getToken() - break - } - val := p.parseRvalue() - if arrayType == nil { - arrayType = reflect.TypeOf(val) - } - if reflect.TypeOf(val) != arrayType { - p.raiseError(follow, "mixed types in array") - } - array = append(array, val) - follow = p.peek() - if follow == nil || follow.typ == tokenEOF { - p.raiseError(follow, "unterminated array") - } - if follow.typ != tokenRightBracket && follow.typ != tokenComma { - p.raiseError(follow, "missing comma") - } - if follow.typ == tokenComma { - p.getToken() - } - } - // An array of Trees is actually an array of inline - // tables, which is a shorthand for a table array. If the - // array was not converted from []interface{} to []*Tree, - // the two notations would not be equivalent. - if arrayType == reflect.TypeOf(newTree()) { - tomlArray := make([]*Tree, len(array)) - for i, v := range array { - tomlArray[i] = v.(*Tree) - } - return tomlArray - } - return array -} - -func parseToml(flow []token) *Tree { - result := newTree() - result.position = Position{1, 1} - parser := &tomlParser{ - flowIdx: 0, - flow: flow, - tree: result, - currentTable: make([]string, 0), - seenTableKeys: make([]string, 0), - } - parser.run() - return result -} - -func init() { - numberUnderscoreInvalidRegexp = regexp.MustCompile(`([^\d]_|_[^\d])|_$|^_`) - hexNumberUnderscoreInvalidRegexp = regexp.MustCompile(`(^0x_)|([^\da-f]_|_[^\da-f])|_$|^_`) -} diff --git a/vendor/github.com/pelletier/go-toml/position.go b/vendor/github.com/pelletier/go-toml/position.go deleted file mode 100644 index c17bff87b..000000000 --- a/vendor/github.com/pelletier/go-toml/position.go +++ /dev/null @@ -1,29 +0,0 @@ -// Position support for go-toml - -package toml - -import ( - "fmt" -) - -// Position of a document element within a TOML document. -// -// Line and Col are both 1-indexed positions for the element's line number and -// column number, respectively. Values of zero or less will cause Invalid(), -// to return true. -type Position struct { - Line int // line within the document - Col int // column within the line -} - -// String representation of the position. -// Displays 1-indexed line and column numbers. -func (p Position) String() string { - return fmt.Sprintf("(%d, %d)", p.Line, p.Col) -} - -// Invalid returns whether or not the position is valid (i.e. with negative or -// null values) -func (p Position) Invalid() bool { - return p.Line <= 0 || p.Col <= 0 -} diff --git a/vendor/github.com/pelletier/go-toml/token.go b/vendor/github.com/pelletier/go-toml/token.go deleted file mode 100644 index 1a9081346..000000000 --- a/vendor/github.com/pelletier/go-toml/token.go +++ /dev/null @@ -1,144 +0,0 @@ -package toml - -import ( - "fmt" - "strconv" - "unicode" -) - -// Define tokens -type tokenType int - -const ( - eof = -(iota + 1) -) - -const ( - tokenError tokenType = iota - tokenEOF - tokenComment - tokenKey - tokenString - tokenInteger - tokenTrue - tokenFalse - tokenFloat - tokenInf - tokenNan - tokenEqual - tokenLeftBracket - tokenRightBracket - tokenLeftCurlyBrace - tokenRightCurlyBrace - tokenLeftParen - tokenRightParen - tokenDoubleLeftBracket - tokenDoubleRightBracket - tokenDate - tokenKeyGroup - tokenKeyGroupArray - tokenComma - tokenColon - tokenDollar - tokenStar - tokenQuestion - tokenDot - tokenDotDot - tokenEOL -) - -var tokenTypeNames = []string{ - "Error", - "EOF", - "Comment", - "Key", - "String", - "Integer", - "True", - "False", - "Float", - "Inf", - "NaN", - "=", - "[", - "]", - "{", - "}", - "(", - ")", - "]]", - "[[", - "Date", - "KeyGroup", - "KeyGroupArray", - ",", - ":", - "$", - "*", - "?", - ".", - "..", - "EOL", -} - -type token struct { - Position - typ tokenType - val string -} - -func (tt tokenType) String() string { - idx := int(tt) - if idx < len(tokenTypeNames) { - return tokenTypeNames[idx] - } - return "Unknown" -} - -func (t token) Int() int { - if result, err := strconv.Atoi(t.val); err != nil { - panic(err) - } else { - return result - } -} - -func (t token) String() string { - switch t.typ { - case tokenEOF: - return "EOF" - case tokenError: - return t.val - } - - return fmt.Sprintf("%q", t.val) -} - -func isSpace(r rune) bool { - return r == ' ' || r == '\t' -} - -func isAlphanumeric(r rune) bool { - return unicode.IsLetter(r) || r == '_' -} - -func isKeyChar(r rune) bool { - // Keys start with the first character that isn't whitespace or [ and end - // with the last non-whitespace character before the equals sign. Keys - // cannot contain a # character." - return !(r == '\r' || r == '\n' || r == eof || r == '=') -} - -func isKeyStartChar(r rune) bool { - return !(isSpace(r) || r == '\r' || r == '\n' || r == eof || r == '[') -} - -func isDigit(r rune) bool { - return unicode.IsNumber(r) -} - -func isHexDigit(r rune) bool { - return isDigit(r) || - (r >= 'a' && r <= 'f') || - (r >= 'A' && r <= 'F') -} diff --git a/vendor/github.com/pelletier/go-toml/toml.go b/vendor/github.com/pelletier/go-toml/toml.go deleted file mode 100644 index 05493a444..000000000 --- a/vendor/github.com/pelletier/go-toml/toml.go +++ /dev/null @@ -1,309 +0,0 @@ -package toml - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "runtime" - "strings" -) - -type tomlValue struct { - value interface{} // string, int64, uint64, float64, bool, time.Time, [] of any of this list - comment string - commented bool - position Position -} - -// Tree is the result of the parsing of a TOML file. -type Tree struct { - values map[string]interface{} // string -> *tomlValue, *Tree, []*Tree - comment string - commented bool - position Position -} - -func newTree() *Tree { - return &Tree{ - values: make(map[string]interface{}), - position: Position{}, - } -} - -// TreeFromMap initializes a new Tree object using the given map. -func TreeFromMap(m map[string]interface{}) (*Tree, error) { - result, err := toTree(m) - if err != nil { - return nil, err - } - return result.(*Tree), nil -} - -// Position returns the position of the tree. -func (t *Tree) Position() Position { - return t.position -} - -// Has returns a boolean indicating if the given key exists. -func (t *Tree) Has(key string) bool { - if key == "" { - return false - } - return t.HasPath(strings.Split(key, ".")) -} - -// HasPath returns true if the given path of keys exists, false otherwise. -func (t *Tree) HasPath(keys []string) bool { - return t.GetPath(keys) != nil -} - -// Keys returns the keys of the toplevel tree (does not recurse). -func (t *Tree) Keys() []string { - keys := make([]string, len(t.values)) - i := 0 - for k := range t.values { - keys[i] = k - i++ - } - return keys -} - -// Get the value at key in the Tree. -// Key is a dot-separated path (e.g. a.b.c) without single/double quoted strings. -// If you need to retrieve non-bare keys, use GetPath. -// Returns nil if the path does not exist in the tree. -// If keys is of length zero, the current tree is returned. -func (t *Tree) Get(key string) interface{} { - if key == "" { - return t - } - return t.GetPath(strings.Split(key, ".")) -} - -// GetPath returns the element in the tree indicated by 'keys'. -// If keys is of length zero, the current tree is returned. -func (t *Tree) GetPath(keys []string) interface{} { - if len(keys) == 0 { - return t - } - subtree := t - for _, intermediateKey := range keys[:len(keys)-1] { - value, exists := subtree.values[intermediateKey] - if !exists { - return nil - } - switch node := value.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - return nil - } - subtree = node[len(node)-1] - default: - return nil // cannot navigate through other node types - } - } - // branch based on final node type - switch node := subtree.values[keys[len(keys)-1]].(type) { - case *tomlValue: - return node.value - default: - return node - } -} - -// GetPosition returns the position of the given key. -func (t *Tree) GetPosition(key string) Position { - if key == "" { - return t.position - } - return t.GetPositionPath(strings.Split(key, ".")) -} - -// GetPositionPath returns the element in the tree indicated by 'keys'. -// If keys is of length zero, the current tree is returned. -func (t *Tree) GetPositionPath(keys []string) Position { - if len(keys) == 0 { - return t.position - } - subtree := t - for _, intermediateKey := range keys[:len(keys)-1] { - value, exists := subtree.values[intermediateKey] - if !exists { - return Position{0, 0} - } - switch node := value.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - return Position{0, 0} - } - subtree = node[len(node)-1] - default: - return Position{0, 0} - } - } - // branch based on final node type - switch node := subtree.values[keys[len(keys)-1]].(type) { - case *tomlValue: - return node.position - case *Tree: - return node.position - case []*Tree: - // go to most recent element - if len(node) == 0 { - return Position{0, 0} - } - return node[len(node)-1].position - default: - return Position{0, 0} - } -} - -// GetDefault works like Get but with a default value -func (t *Tree) GetDefault(key string, def interface{}) interface{} { - val := t.Get(key) - if val == nil { - return def - } - return val -} - -// Set an element in the tree. -// Key is a dot-separated path (e.g. a.b.c). -// Creates all necessary intermediate trees, if needed. -func (t *Tree) Set(key string, value interface{}) { - t.SetWithComment(key, "", false, value) -} - -// SetWithComment is the same as Set, but allows you to provide comment -// information to the key, that will be reused by Marshal(). -func (t *Tree) SetWithComment(key string, comment string, commented bool, value interface{}) { - t.SetPathWithComment(strings.Split(key, "."), comment, commented, value) -} - -// SetPath sets an element in the tree. -// Keys is an array of path elements (e.g. {"a","b","c"}). -// Creates all necessary intermediate trees, if needed. -func (t *Tree) SetPath(keys []string, value interface{}) { - t.SetPathWithComment(keys, "", false, value) -} - -// SetPathWithComment is the same as SetPath, but allows you to provide comment -// information to the key, that will be reused by Marshal(). -func (t *Tree) SetPathWithComment(keys []string, comment string, commented bool, value interface{}) { - subtree := t - for _, intermediateKey := range keys[:len(keys)-1] { - nextTree, exists := subtree.values[intermediateKey] - if !exists { - nextTree = newTree() - subtree.values[intermediateKey] = nextTree // add new element here - } - switch node := nextTree.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - // create element if it does not exist - subtree.values[intermediateKey] = append(node, newTree()) - } - subtree = node[len(node)-1] - } - } - - var toInsert interface{} - - switch value.(type) { - case *Tree: - tt := value.(*Tree) - tt.comment = comment - toInsert = value - case []*Tree: - toInsert = value - case *tomlValue: - tt := value.(*tomlValue) - tt.comment = comment - toInsert = tt - default: - toInsert = &tomlValue{value: value, comment: comment, commented: commented} - } - - subtree.values[keys[len(keys)-1]] = toInsert -} - -// createSubTree takes a tree and a key and create the necessary intermediate -// subtrees to create a subtree at that point. In-place. -// -// e.g. passing a.b.c will create (assuming tree is empty) tree[a], tree[a][b] -// and tree[a][b][c] -// -// Returns nil on success, error object on failure -func (t *Tree) createSubTree(keys []string, pos Position) error { - subtree := t - for _, intermediateKey := range keys { - nextTree, exists := subtree.values[intermediateKey] - if !exists { - tree := newTree() - tree.position = pos - subtree.values[intermediateKey] = tree - nextTree = tree - } - - switch node := nextTree.(type) { - case []*Tree: - subtree = node[len(node)-1] - case *Tree: - subtree = node - default: - return fmt.Errorf("unknown type for path %s (%s): %T (%#v)", - strings.Join(keys, "."), intermediateKey, nextTree, nextTree) - } - } - return nil -} - -// LoadBytes creates a Tree from a []byte. -func LoadBytes(b []byte) (tree *Tree, err error) { - defer func() { - if r := recover(); r != nil { - if _, ok := r.(runtime.Error); ok { - panic(r) - } - err = errors.New(r.(string)) - } - }() - tree = parseToml(lexToml(b)) - return -} - -// LoadReader creates a Tree from any io.Reader. -func LoadReader(reader io.Reader) (tree *Tree, err error) { - inputBytes, err := ioutil.ReadAll(reader) - if err != nil { - return - } - tree, err = LoadBytes(inputBytes) - return -} - -// Load creates a Tree from a string. -func Load(content string) (tree *Tree, err error) { - return LoadBytes([]byte(content)) -} - -// LoadFile creates a Tree from a file. -func LoadFile(path string) (tree *Tree, err error) { - file, err := os.Open(path) - if err != nil { - return nil, err - } - defer file.Close() - return LoadReader(file) -} diff --git a/vendor/github.com/pelletier/go-toml/tomltree_create.go b/vendor/github.com/pelletier/go-toml/tomltree_create.go deleted file mode 100644 index 79610e9b3..000000000 --- a/vendor/github.com/pelletier/go-toml/tomltree_create.go +++ /dev/null @@ -1,142 +0,0 @@ -package toml - -import ( - "fmt" - "reflect" - "time" -) - -var kindToType = [reflect.String + 1]reflect.Type{ - reflect.Bool: reflect.TypeOf(true), - reflect.String: reflect.TypeOf(""), - reflect.Float32: reflect.TypeOf(float64(1)), - reflect.Float64: reflect.TypeOf(float64(1)), - reflect.Int: reflect.TypeOf(int64(1)), - reflect.Int8: reflect.TypeOf(int64(1)), - reflect.Int16: reflect.TypeOf(int64(1)), - reflect.Int32: reflect.TypeOf(int64(1)), - reflect.Int64: reflect.TypeOf(int64(1)), - reflect.Uint: reflect.TypeOf(uint64(1)), - reflect.Uint8: reflect.TypeOf(uint64(1)), - reflect.Uint16: reflect.TypeOf(uint64(1)), - reflect.Uint32: reflect.TypeOf(uint64(1)), - reflect.Uint64: reflect.TypeOf(uint64(1)), -} - -// typeFor returns a reflect.Type for a reflect.Kind, or nil if none is found. -// supported values: -// string, bool, int64, uint64, float64, time.Time, int, int8, int16, int32, uint, uint8, uint16, uint32, float32 -func typeFor(k reflect.Kind) reflect.Type { - if k > 0 && int(k) < len(kindToType) { - return kindToType[k] - } - return nil -} - -func simpleValueCoercion(object interface{}) (interface{}, error) { - switch original := object.(type) { - case string, bool, int64, uint64, float64, time.Time: - return original, nil - case int: - return int64(original), nil - case int8: - return int64(original), nil - case int16: - return int64(original), nil - case int32: - return int64(original), nil - case uint: - return uint64(original), nil - case uint8: - return uint64(original), nil - case uint16: - return uint64(original), nil - case uint32: - return uint64(original), nil - case float32: - return float64(original), nil - case fmt.Stringer: - return original.String(), nil - default: - return nil, fmt.Errorf("cannot convert type %T to Tree", object) - } -} - -func sliceToTree(object interface{}) (interface{}, error) { - // arrays are a bit tricky, since they can represent either a - // collection of simple values, which is represented by one - // *tomlValue, or an array of tables, which is represented by an - // array of *Tree. - - // holding the assumption that this function is called from toTree only when value.Kind() is Array or Slice - value := reflect.ValueOf(object) - insideType := value.Type().Elem() - length := value.Len() - if length > 0 { - insideType = reflect.ValueOf(value.Index(0).Interface()).Type() - } - if insideType.Kind() == reflect.Map { - // this is considered as an array of tables - tablesArray := make([]*Tree, 0, length) - for i := 0; i < length; i++ { - table := value.Index(i) - tree, err := toTree(table.Interface()) - if err != nil { - return nil, err - } - tablesArray = append(tablesArray, tree.(*Tree)) - } - return tablesArray, nil - } - - sliceType := typeFor(insideType.Kind()) - if sliceType == nil { - sliceType = insideType - } - - arrayValue := reflect.MakeSlice(reflect.SliceOf(sliceType), 0, length) - - for i := 0; i < length; i++ { - val := value.Index(i).Interface() - simpleValue, err := simpleValueCoercion(val) - if err != nil { - return nil, err - } - arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue)) - } - return &tomlValue{value: arrayValue.Interface(), position: Position{}}, nil -} - -func toTree(object interface{}) (interface{}, error) { - value := reflect.ValueOf(object) - - if value.Kind() == reflect.Map { - values := map[string]interface{}{} - keys := value.MapKeys() - for _, key := range keys { - if key.Kind() != reflect.String { - if _, ok := key.Interface().(string); !ok { - return nil, fmt.Errorf("map key needs to be a string, not %T (%v)", key.Interface(), key.Kind()) - } - } - - v := value.MapIndex(key) - newValue, err := toTree(v.Interface()) - if err != nil { - return nil, err - } - values[key.String()] = newValue - } - return &Tree{values: values, position: Position{}}, nil - } - - if value.Kind() == reflect.Array || value.Kind() == reflect.Slice { - return sliceToTree(object) - } - - simpleValue, err := simpleValueCoercion(object) - if err != nil { - return nil, err - } - return &tomlValue{value: simpleValue, position: Position{}}, nil -} diff --git a/vendor/github.com/pelletier/go-toml/tomltree_write.go b/vendor/github.com/pelletier/go-toml/tomltree_write.go deleted file mode 100644 index d322a9764..000000000 --- a/vendor/github.com/pelletier/go-toml/tomltree_write.go +++ /dev/null @@ -1,289 +0,0 @@ -package toml - -import ( - "bytes" - "fmt" - "io" - "math" - "reflect" - "sort" - "strconv" - "strings" - "time" -) - -// encodes a string to a TOML-compliant string value -func encodeTomlString(value string) string { - var b bytes.Buffer - - for _, rr := range value { - switch rr { - case '\b': - b.WriteString(`\b`) - case '\t': - b.WriteString(`\t`) - case '\n': - b.WriteString(`\n`) - case '\f': - b.WriteString(`\f`) - case '\r': - b.WriteString(`\r`) - case '"': - b.WriteString(`\"`) - case '\\': - b.WriteString(`\\`) - default: - intRr := uint16(rr) - if intRr < 0x001F { - b.WriteString(fmt.Sprintf("\\u%0.4X", intRr)) - } else { - b.WriteRune(rr) - } - } - } - return b.String() -} - -func tomlValueStringRepresentation(v interface{}, indent string, arraysOneElementPerLine bool) (string, error) { - switch value := v.(type) { - case uint64: - return strconv.FormatUint(value, 10), nil - case int64: - return strconv.FormatInt(value, 10), nil - case float64: - // Ensure a round float does contain a decimal point. Otherwise feeding - // the output back to the parser would convert to an integer. - if math.Trunc(value) == value { - return strings.ToLower(strconv.FormatFloat(value, 'f', 1, 32)), nil - } - return strings.ToLower(strconv.FormatFloat(value, 'f', -1, 32)), nil - case string: - return "\"" + encodeTomlString(value) + "\"", nil - case []byte: - b, _ := v.([]byte) - return tomlValueStringRepresentation(string(b), indent, arraysOneElementPerLine) - case bool: - if value { - return "true", nil - } - return "false", nil - case time.Time: - return value.Format(time.RFC3339), nil - case nil: - return "", nil - } - - rv := reflect.ValueOf(v) - - if rv.Kind() == reflect.Slice { - var values []string - for i := 0; i < rv.Len(); i++ { - item := rv.Index(i).Interface() - itemRepr, err := tomlValueStringRepresentation(item, indent, arraysOneElementPerLine) - if err != nil { - return "", err - } - values = append(values, itemRepr) - } - if arraysOneElementPerLine && len(values) > 1 { - stringBuffer := bytes.Buffer{} - valueIndent := indent + ` ` // TODO: move that to a shared encoder state - - stringBuffer.WriteString("[\n") - - for i, value := range values { - stringBuffer.WriteString(valueIndent) - stringBuffer.WriteString(value) - if i != len(values)-1 { - stringBuffer.WriteString(`,`) - } - stringBuffer.WriteString("\n") - } - - stringBuffer.WriteString(indent + "]") - - return stringBuffer.String(), nil - } - return "[" + strings.Join(values, ",") + "]", nil - } - return "", fmt.Errorf("unsupported value type %T: %v", v, v) -} - -func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool) (int64, error) { - simpleValuesKeys := make([]string, 0) - complexValuesKeys := make([]string, 0) - - for k := range t.values { - v := t.values[k] - switch v.(type) { - case *Tree, []*Tree: - complexValuesKeys = append(complexValuesKeys, k) - default: - simpleValuesKeys = append(simpleValuesKeys, k) - } - } - - sort.Strings(simpleValuesKeys) - sort.Strings(complexValuesKeys) - - for _, k := range simpleValuesKeys { - v, ok := t.values[k].(*tomlValue) - if !ok { - return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) - } - - repr, err := tomlValueStringRepresentation(v.value, indent, arraysOneElementPerLine) - if err != nil { - return bytesCount, err - } - - if v.comment != "" { - comment := strings.Replace(v.comment, "\n", "\n"+indent+"#", -1) - start := "# " - if strings.HasPrefix(comment, "#") { - start = "" - } - writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment, "\n") - bytesCount += int64(writtenBytesCountComment) - if errc != nil { - return bytesCount, errc - } - } - - var commented string - if v.commented { - commented = "# " - } - writtenBytesCount, err := writeStrings(w, indent, commented, k, " = ", repr, "\n") - bytesCount += int64(writtenBytesCount) - if err != nil { - return bytesCount, err - } - } - - for _, k := range complexValuesKeys { - v := t.values[k] - - combinedKey := k - if keyspace != "" { - combinedKey = keyspace + "." + combinedKey - } - var commented string - if t.commented { - commented = "# " - } - - switch node := v.(type) { - // node has to be of those two types given how keys are sorted above - case *Tree: - tv, ok := t.values[k].(*Tree) - if !ok { - return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) - } - if tv.comment != "" { - comment := strings.Replace(tv.comment, "\n", "\n"+indent+"#", -1) - start := "# " - if strings.HasPrefix(comment, "#") { - start = "" - } - writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment) - bytesCount += int64(writtenBytesCountComment) - if errc != nil { - return bytesCount, errc - } - } - writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[", combinedKey, "]\n") - bytesCount += int64(writtenBytesCount) - if err != nil { - return bytesCount, err - } - bytesCount, err = node.writeTo(w, indent+" ", combinedKey, bytesCount, arraysOneElementPerLine) - if err != nil { - return bytesCount, err - } - case []*Tree: - for _, subTree := range node { - writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[[", combinedKey, "]]\n") - bytesCount += int64(writtenBytesCount) - if err != nil { - return bytesCount, err - } - - bytesCount, err = subTree.writeTo(w, indent+" ", combinedKey, bytesCount, arraysOneElementPerLine) - if err != nil { - return bytesCount, err - } - } - } - } - - return bytesCount, nil -} - -func writeStrings(w io.Writer, s ...string) (int, error) { - var n int - for i := range s { - b, err := io.WriteString(w, s[i]) - n += b - if err != nil { - return n, err - } - } - return n, nil -} - -// WriteTo encode the Tree as Toml and writes it to the writer w. -// Returns the number of bytes written in case of success, or an error if anything happened. -func (t *Tree) WriteTo(w io.Writer) (int64, error) { - return t.writeTo(w, "", "", 0, false) -} - -// ToTomlString generates a human-readable representation of the current tree. -// Output spans multiple lines, and is suitable for ingest by a TOML parser. -// If the conversion cannot be performed, ToString returns a non-nil error. -func (t *Tree) ToTomlString() (string, error) { - var buf bytes.Buffer - _, err := t.WriteTo(&buf) - if err != nil { - return "", err - } - return buf.String(), nil -} - -// String generates a human-readable representation of the current tree. -// Alias of ToString. Present to implement the fmt.Stringer interface. -func (t *Tree) String() string { - result, _ := t.ToTomlString() - return result -} - -// ToMap recursively generates a representation of the tree using Go built-in structures. -// The following types are used: -// -// * bool -// * float64 -// * int64 -// * string -// * uint64 -// * time.Time -// * map[string]interface{} (where interface{} is any of this list) -// * []interface{} (where interface{} is any of this list) -func (t *Tree) ToMap() map[string]interface{} { - result := map[string]interface{}{} - - for k, v := range t.values { - switch node := v.(type) { - case []*Tree: - var array []interface{} - for _, item := range node { - array = append(array, item.ToMap()) - } - result[k] = array - case *Tree: - result[k] = node.ToMap() - case *tomlValue: - result[k] = node.value - } - } - return result -} diff --git a/vendor/github.com/spf13/afero/LICENSE.txt b/vendor/github.com/spf13/afero/LICENSE.txt deleted file mode 100644 index 298f0e266..000000000 --- a/vendor/github.com/spf13/afero/LICENSE.txt +++ /dev/null @@ -1,174 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/spf13/afero/afero.go b/vendor/github.com/spf13/afero/afero.go deleted file mode 100644 index f5b5e127c..000000000 --- a/vendor/github.com/spf13/afero/afero.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright © 2014 Steve Francia . -// Copyright 2013 tsuru authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package afero provides types and methods for interacting with the filesystem, -// as an abstraction layer. - -// Afero also provides a few implementations that are mostly interoperable. One that -// uses the operating system filesystem, one that uses memory to store files -// (cross platform) and an interface that should be implemented if you want to -// provide your own filesystem. - -package afero - -import ( - "errors" - "io" - "os" - "time" -) - -type Afero struct { - Fs -} - -// File represents a file in the filesystem. -type File interface { - io.Closer - io.Reader - io.ReaderAt - io.Seeker - io.Writer - io.WriterAt - - Name() string - Readdir(count int) ([]os.FileInfo, error) - Readdirnames(n int) ([]string, error) - Stat() (os.FileInfo, error) - Sync() error - Truncate(size int64) error - WriteString(s string) (ret int, err error) -} - -// Fs is the filesystem interface. -// -// Any simulated or real filesystem should implement this interface. -type Fs interface { - // Create creates a file in the filesystem, returning the file and an - // error, if any happens. - Create(name string) (File, error) - - // Mkdir creates a directory in the filesystem, return an error if any - // happens. - Mkdir(name string, perm os.FileMode) error - - // MkdirAll creates a directory path and all parents that does not exist - // yet. - MkdirAll(path string, perm os.FileMode) error - - // Open opens a file, returning it or an error, if any happens. - Open(name string) (File, error) - - // OpenFile opens a file using the given flags and the given mode. - OpenFile(name string, flag int, perm os.FileMode) (File, error) - - // Remove removes a file identified by name, returning an error, if any - // happens. - Remove(name string) error - - // RemoveAll removes a directory path and any children it contains. It - // does not fail if the path does not exist (return nil). - RemoveAll(path string) error - - // Rename renames a file. - Rename(oldname, newname string) error - - // Stat returns a FileInfo describing the named file, or an error, if any - // happens. - Stat(name string) (os.FileInfo, error) - - // The name of this FileSystem - Name() string - - //Chmod changes the mode of the named file to mode. - Chmod(name string, mode os.FileMode) error - - //Chtimes changes the access and modification times of the named file - Chtimes(name string, atime time.Time, mtime time.Time) error -} - -var ( - ErrFileClosed = errors.New("File is closed") - ErrOutOfRange = errors.New("Out of range") - ErrTooLarge = errors.New("Too large") - ErrFileNotFound = os.ErrNotExist - ErrFileExists = os.ErrExist - ErrDestinationExists = os.ErrExist -) diff --git a/vendor/github.com/spf13/afero/basepath.go b/vendor/github.com/spf13/afero/basepath.go deleted file mode 100644 index 616ff8ff7..000000000 --- a/vendor/github.com/spf13/afero/basepath.go +++ /dev/null @@ -1,180 +0,0 @@ -package afero - -import ( - "os" - "path/filepath" - "runtime" - "strings" - "time" -) - -var _ Lstater = (*BasePathFs)(nil) - -// The BasePathFs restricts all operations to a given path within an Fs. -// The given file name to the operations on this Fs will be prepended with -// the base path before calling the base Fs. -// Any file name (after filepath.Clean()) outside this base path will be -// treated as non existing file. -// -// Note that it does not clean the error messages on return, so you may -// reveal the real path on errors. -type BasePathFs struct { - source Fs - path string -} - -type BasePathFile struct { - File - path string -} - -func (f *BasePathFile) Name() string { - sourcename := f.File.Name() - return strings.TrimPrefix(sourcename, filepath.Clean(f.path)) -} - -func NewBasePathFs(source Fs, path string) Fs { - return &BasePathFs{source: source, path: path} -} - -// on a file outside the base path it returns the given file name and an error, -// else the given file with the base path prepended -func (b *BasePathFs) RealPath(name string) (path string, err error) { - if err := validateBasePathName(name); err != nil { - return name, err - } - - bpath := filepath.Clean(b.path) - path = filepath.Clean(filepath.Join(bpath, name)) - if !strings.HasPrefix(path, bpath) { - return name, os.ErrNotExist - } - - return path, nil -} - -func validateBasePathName(name string) error { - if runtime.GOOS != "windows" { - // Not much to do here; - // the virtual file paths all look absolute on *nix. - return nil - } - - // On Windows a common mistake would be to provide an absolute OS path - // We could strip out the base part, but that would not be very portable. - if filepath.IsAbs(name) { - return os.ErrNotExist - } - - return nil -} - -func (b *BasePathFs) Chtimes(name string, atime, mtime time.Time) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "chtimes", Path: name, Err: err} - } - return b.source.Chtimes(name, atime, mtime) -} - -func (b *BasePathFs) Chmod(name string, mode os.FileMode) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "chmod", Path: name, Err: err} - } - return b.source.Chmod(name, mode) -} - -func (b *BasePathFs) Name() string { - return "BasePathFs" -} - -func (b *BasePathFs) Stat(name string) (fi os.FileInfo, err error) { - if name, err = b.RealPath(name); err != nil { - return nil, &os.PathError{Op: "stat", Path: name, Err: err} - } - return b.source.Stat(name) -} - -func (b *BasePathFs) Rename(oldname, newname string) (err error) { - if oldname, err = b.RealPath(oldname); err != nil { - return &os.PathError{Op: "rename", Path: oldname, Err: err} - } - if newname, err = b.RealPath(newname); err != nil { - return &os.PathError{Op: "rename", Path: newname, Err: err} - } - return b.source.Rename(oldname, newname) -} - -func (b *BasePathFs) RemoveAll(name string) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "remove_all", Path: name, Err: err} - } - return b.source.RemoveAll(name) -} - -func (b *BasePathFs) Remove(name string) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "remove", Path: name, Err: err} - } - return b.source.Remove(name) -} - -func (b *BasePathFs) OpenFile(name string, flag int, mode os.FileMode) (f File, err error) { - if name, err = b.RealPath(name); err != nil { - return nil, &os.PathError{Op: "openfile", Path: name, Err: err} - } - sourcef, err := b.source.OpenFile(name, flag, mode) - if err != nil { - return nil, err - } - return &BasePathFile{sourcef, b.path}, nil -} - -func (b *BasePathFs) Open(name string) (f File, err error) { - if name, err = b.RealPath(name); err != nil { - return nil, &os.PathError{Op: "open", Path: name, Err: err} - } - sourcef, err := b.source.Open(name) - if err != nil { - return nil, err - } - return &BasePathFile{File: sourcef, path: b.path}, nil -} - -func (b *BasePathFs) Mkdir(name string, mode os.FileMode) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: err} - } - return b.source.Mkdir(name, mode) -} - -func (b *BasePathFs) MkdirAll(name string, mode os.FileMode) (err error) { - if name, err = b.RealPath(name); err != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: err} - } - return b.source.MkdirAll(name, mode) -} - -func (b *BasePathFs) Create(name string) (f File, err error) { - if name, err = b.RealPath(name); err != nil { - return nil, &os.PathError{Op: "create", Path: name, Err: err} - } - sourcef, err := b.source.Create(name) - if err != nil { - return nil, err - } - return &BasePathFile{File: sourcef, path: b.path}, nil -} - -func (b *BasePathFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - name, err := b.RealPath(name) - if err != nil { - return nil, false, &os.PathError{Op: "lstat", Path: name, Err: err} - } - if lstater, ok := b.source.(Lstater); ok { - return lstater.LstatIfPossible(name) - } - fi, err := b.source.Stat(name) - return fi, false, err -} - -// vim: ts=4 sw=4 noexpandtab nolist syn=go diff --git a/vendor/github.com/spf13/afero/cacheOnReadFs.go b/vendor/github.com/spf13/afero/cacheOnReadFs.go deleted file mode 100644 index 29a26c67d..000000000 --- a/vendor/github.com/spf13/afero/cacheOnReadFs.go +++ /dev/null @@ -1,290 +0,0 @@ -package afero - -import ( - "os" - "syscall" - "time" -) - -// If the cache duration is 0, cache time will be unlimited, i.e. once -// a file is in the layer, the base will never be read again for this file. -// -// For cache times greater than 0, the modification time of a file is -// checked. Note that a lot of file system implementations only allow a -// resolution of a second for timestamps... or as the godoc for os.Chtimes() -// states: "The underlying filesystem may truncate or round the values to a -// less precise time unit." -// -// This caching union will forward all write calls also to the base file -// system first. To prevent writing to the base Fs, wrap it in a read-only -// filter - Note: this will also make the overlay read-only, for writing files -// in the overlay, use the overlay Fs directly, not via the union Fs. -type CacheOnReadFs struct { - base Fs - layer Fs - cacheTime time.Duration -} - -func NewCacheOnReadFs(base Fs, layer Fs, cacheTime time.Duration) Fs { - return &CacheOnReadFs{base: base, layer: layer, cacheTime: cacheTime} -} - -type cacheState int - -const ( - // not present in the overlay, unknown if it exists in the base: - cacheMiss cacheState = iota - // present in the overlay and in base, base file is newer: - cacheStale - // present in the overlay - with cache time == 0 it may exist in the base, - // with cacheTime > 0 it exists in the base and is same age or newer in the - // overlay - cacheHit - // happens if someone writes directly to the overlay without - // going through this union - cacheLocal -) - -func (u *CacheOnReadFs) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) { - var lfi, bfi os.FileInfo - lfi, err = u.layer.Stat(name) - if err == nil { - if u.cacheTime == 0 { - return cacheHit, lfi, nil - } - if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) { - bfi, err = u.base.Stat(name) - if err != nil { - return cacheLocal, lfi, nil - } - if bfi.ModTime().After(lfi.ModTime()) { - return cacheStale, bfi, nil - } - } - return cacheHit, lfi, nil - } - - if err == syscall.ENOENT || os.IsNotExist(err) { - return cacheMiss, nil, nil - } - - return cacheMiss, nil, err -} - -func (u *CacheOnReadFs) copyToLayer(name string) error { - return copyToLayer(u.base, u.layer, name) -} - -func (u *CacheOnReadFs) Chtimes(name string, atime, mtime time.Time) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit: - err = u.base.Chtimes(name, atime, mtime) - case cacheStale, cacheMiss: - if err := u.copyToLayer(name); err != nil { - return err - } - err = u.base.Chtimes(name, atime, mtime) - } - if err != nil { - return err - } - return u.layer.Chtimes(name, atime, mtime) -} - -func (u *CacheOnReadFs) Chmod(name string, mode os.FileMode) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit: - err = u.base.Chmod(name, mode) - case cacheStale, cacheMiss: - if err := u.copyToLayer(name); err != nil { - return err - } - err = u.base.Chmod(name, mode) - } - if err != nil { - return err - } - return u.layer.Chmod(name, mode) -} - -func (u *CacheOnReadFs) Stat(name string) (os.FileInfo, error) { - st, fi, err := u.cacheStatus(name) - if err != nil { - return nil, err - } - switch st { - case cacheMiss: - return u.base.Stat(name) - default: // cacheStale has base, cacheHit and cacheLocal the layer os.FileInfo - return fi, nil - } -} - -func (u *CacheOnReadFs) Rename(oldname, newname string) error { - st, _, err := u.cacheStatus(oldname) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit: - err = u.base.Rename(oldname, newname) - case cacheStale, cacheMiss: - if err := u.copyToLayer(oldname); err != nil { - return err - } - err = u.base.Rename(oldname, newname) - } - if err != nil { - return err - } - return u.layer.Rename(oldname, newname) -} - -func (u *CacheOnReadFs) Remove(name string) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit, cacheStale, cacheMiss: - err = u.base.Remove(name) - } - if err != nil { - return err - } - return u.layer.Remove(name) -} - -func (u *CacheOnReadFs) RemoveAll(name string) error { - st, _, err := u.cacheStatus(name) - if err != nil { - return err - } - switch st { - case cacheLocal: - case cacheHit, cacheStale, cacheMiss: - err = u.base.RemoveAll(name) - } - if err != nil { - return err - } - return u.layer.RemoveAll(name) -} - -func (u *CacheOnReadFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - st, _, err := u.cacheStatus(name) - if err != nil { - return nil, err - } - switch st { - case cacheLocal, cacheHit: - default: - if err := u.copyToLayer(name); err != nil { - return nil, err - } - } - if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { - bfi, err := u.base.OpenFile(name, flag, perm) - if err != nil { - return nil, err - } - lfi, err := u.layer.OpenFile(name, flag, perm) - if err != nil { - bfi.Close() // oops, what if O_TRUNC was set and file opening in the layer failed...? - return nil, err - } - return &UnionFile{Base: bfi, Layer: lfi}, nil - } - return u.layer.OpenFile(name, flag, perm) -} - -func (u *CacheOnReadFs) Open(name string) (File, error) { - st, fi, err := u.cacheStatus(name) - if err != nil { - return nil, err - } - - switch st { - case cacheLocal: - return u.layer.Open(name) - - case cacheMiss: - bfi, err := u.base.Stat(name) - if err != nil { - return nil, err - } - if bfi.IsDir() { - return u.base.Open(name) - } - if err := u.copyToLayer(name); err != nil { - return nil, err - } - return u.layer.Open(name) - - case cacheStale: - if !fi.IsDir() { - if err := u.copyToLayer(name); err != nil { - return nil, err - } - return u.layer.Open(name) - } - case cacheHit: - if !fi.IsDir() { - return u.layer.Open(name) - } - } - // the dirs from cacheHit, cacheStale fall down here: - bfile, _ := u.base.Open(name) - lfile, err := u.layer.Open(name) - if err != nil && bfile == nil { - return nil, err - } - return &UnionFile{Base: bfile, Layer: lfile}, nil -} - -func (u *CacheOnReadFs) Mkdir(name string, perm os.FileMode) error { - err := u.base.Mkdir(name, perm) - if err != nil { - return err - } - return u.layer.MkdirAll(name, perm) // yes, MkdirAll... we cannot assume it exists in the cache -} - -func (u *CacheOnReadFs) Name() string { - return "CacheOnReadFs" -} - -func (u *CacheOnReadFs) MkdirAll(name string, perm os.FileMode) error { - err := u.base.MkdirAll(name, perm) - if err != nil { - return err - } - return u.layer.MkdirAll(name, perm) -} - -func (u *CacheOnReadFs) Create(name string) (File, error) { - bfh, err := u.base.Create(name) - if err != nil { - return nil, err - } - lfh, err := u.layer.Create(name) - if err != nil { - // oops, see comment about OS_TRUNC above, should we remove? then we have to - // remember if the file did not exist before - bfh.Close() - return nil, err - } - return &UnionFile{Base: bfh, Layer: lfh}, nil -} diff --git a/vendor/github.com/spf13/afero/const_bsds.go b/vendor/github.com/spf13/afero/const_bsds.go deleted file mode 100644 index 5728243d9..000000000 --- a/vendor/github.com/spf13/afero/const_bsds.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright © 2016 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build darwin openbsd freebsd netbsd dragonfly - -package afero - -import ( - "syscall" -) - -const BADFD = syscall.EBADF diff --git a/vendor/github.com/spf13/afero/const_win_unix.go b/vendor/github.com/spf13/afero/const_win_unix.go deleted file mode 100644 index 968fc2783..000000000 --- a/vendor/github.com/spf13/afero/const_win_unix.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright © 2016 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// +build !darwin -// +build !openbsd -// +build !freebsd -// +build !dragonfly -// +build !netbsd - -package afero - -import ( - "syscall" -) - -const BADFD = syscall.EBADFD diff --git a/vendor/github.com/spf13/afero/copyOnWriteFs.go b/vendor/github.com/spf13/afero/copyOnWriteFs.go deleted file mode 100644 index 9aef39794..000000000 --- a/vendor/github.com/spf13/afero/copyOnWriteFs.go +++ /dev/null @@ -1,292 +0,0 @@ -package afero - -import ( - "fmt" - "os" - "path/filepath" - "syscall" - "time" -) - -var _ Lstater = (*CopyOnWriteFs)(nil) - -// The CopyOnWriteFs is a union filesystem: a read only base file system with -// a possibly writeable layer on top. Changes to the file system will only -// be made in the overlay: Changing an existing file in the base layer which -// is not present in the overlay will copy the file to the overlay ("changing" -// includes also calls to e.g. Chtimes() and Chmod()). -// -// Reading directories is currently only supported via Open(), not OpenFile(). -type CopyOnWriteFs struct { - base Fs - layer Fs -} - -func NewCopyOnWriteFs(base Fs, layer Fs) Fs { - return &CopyOnWriteFs{base: base, layer: layer} -} - -// Returns true if the file is not in the overlay -func (u *CopyOnWriteFs) isBaseFile(name string) (bool, error) { - if _, err := u.layer.Stat(name); err == nil { - return false, nil - } - _, err := u.base.Stat(name) - if err != nil { - if oerr, ok := err.(*os.PathError); ok { - if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT || oerr.Err == syscall.ENOTDIR { - return false, nil - } - } - if err == syscall.ENOENT { - return false, nil - } - } - return true, err -} - -func (u *CopyOnWriteFs) copyToLayer(name string) error { - return copyToLayer(u.base, u.layer, name) -} - -func (u *CopyOnWriteFs) Chtimes(name string, atime, mtime time.Time) error { - b, err := u.isBaseFile(name) - if err != nil { - return err - } - if b { - if err := u.copyToLayer(name); err != nil { - return err - } - } - return u.layer.Chtimes(name, atime, mtime) -} - -func (u *CopyOnWriteFs) Chmod(name string, mode os.FileMode) error { - b, err := u.isBaseFile(name) - if err != nil { - return err - } - if b { - if err := u.copyToLayer(name); err != nil { - return err - } - } - return u.layer.Chmod(name, mode) -} - -func (u *CopyOnWriteFs) Stat(name string) (os.FileInfo, error) { - fi, err := u.layer.Stat(name) - if err != nil { - isNotExist := u.isNotExist(err) - if isNotExist { - return u.base.Stat(name) - } - return nil, err - } - return fi, nil -} - -func (u *CopyOnWriteFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - llayer, ok1 := u.layer.(Lstater) - lbase, ok2 := u.base.(Lstater) - - if ok1 { - fi, b, err := llayer.LstatIfPossible(name) - if err == nil { - return fi, b, nil - } - - if !u.isNotExist(err) { - return nil, b, err - } - } - - if ok2 { - fi, b, err := lbase.LstatIfPossible(name) - if err == nil { - return fi, b, nil - } - if !u.isNotExist(err) { - return nil, b, err - } - } - - fi, err := u.Stat(name) - - return fi, false, err -} - -func (u *CopyOnWriteFs) isNotExist(err error) bool { - if e, ok := err.(*os.PathError); ok { - err = e.Err - } - if err == os.ErrNotExist || err == syscall.ENOENT || err == syscall.ENOTDIR { - return true - } - return false -} - -// Renaming files present only in the base layer is not permitted -func (u *CopyOnWriteFs) Rename(oldname, newname string) error { - b, err := u.isBaseFile(oldname) - if err != nil { - return err - } - if b { - return syscall.EPERM - } - return u.layer.Rename(oldname, newname) -} - -// Removing files present only in the base layer is not permitted. If -// a file is present in the base layer and the overlay, only the overlay -// will be removed. -func (u *CopyOnWriteFs) Remove(name string) error { - err := u.layer.Remove(name) - switch err { - case syscall.ENOENT: - _, err = u.base.Stat(name) - if err == nil { - return syscall.EPERM - } - return syscall.ENOENT - default: - return err - } -} - -func (u *CopyOnWriteFs) RemoveAll(name string) error { - err := u.layer.RemoveAll(name) - switch err { - case syscall.ENOENT: - _, err = u.base.Stat(name) - if err == nil { - return syscall.EPERM - } - return syscall.ENOENT - default: - return err - } -} - -func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - b, err := u.isBaseFile(name) - if err != nil { - return nil, err - } - - if flag&(os.O_WRONLY|os.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { - if b { - if err = u.copyToLayer(name); err != nil { - return nil, err - } - return u.layer.OpenFile(name, flag, perm) - } - - dir := filepath.Dir(name) - isaDir, err := IsDir(u.base, dir) - if err != nil && !os.IsNotExist(err) { - return nil, err - } - if isaDir { - if err = u.layer.MkdirAll(dir, 0777); err != nil { - return nil, err - } - return u.layer.OpenFile(name, flag, perm) - } - - isaDir, err = IsDir(u.layer, dir) - if err != nil { - return nil, err - } - if isaDir { - return u.layer.OpenFile(name, flag, perm) - } - - return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOTDIR} // ...or os.ErrNotExist? - } - if b { - return u.base.OpenFile(name, flag, perm) - } - return u.layer.OpenFile(name, flag, perm) -} - -// This function handles the 9 different possibilities caused -// by the union which are the intersection of the following... -// layer: doesn't exist, exists as a file, and exists as a directory -// base: doesn't exist, exists as a file, and exists as a directory -func (u *CopyOnWriteFs) Open(name string) (File, error) { - // Since the overlay overrides the base we check that first - b, err := u.isBaseFile(name) - if err != nil { - return nil, err - } - - // If overlay doesn't exist, return the base (base state irrelevant) - if b { - return u.base.Open(name) - } - - // If overlay is a file, return it (base state irrelevant) - dir, err := IsDir(u.layer, name) - if err != nil { - return nil, err - } - if !dir { - return u.layer.Open(name) - } - - // Overlay is a directory, base state now matters. - // Base state has 3 states to check but 2 outcomes: - // A. It's a file or non-readable in the base (return just the overlay) - // B. It's an accessible directory in the base (return a UnionFile) - - // If base is file or nonreadable, return overlay - dir, err = IsDir(u.base, name) - if !dir || err != nil { - return u.layer.Open(name) - } - - // Both base & layer are directories - // Return union file (if opens are without error) - bfile, bErr := u.base.Open(name) - lfile, lErr := u.layer.Open(name) - - // If either have errors at this point something is very wrong. Return nil and the errors - if bErr != nil || lErr != nil { - return nil, fmt.Errorf("BaseErr: %v\nOverlayErr: %v", bErr, lErr) - } - - return &UnionFile{Base: bfile, Layer: lfile}, nil -} - -func (u *CopyOnWriteFs) Mkdir(name string, perm os.FileMode) error { - dir, err := IsDir(u.base, name) - if err != nil { - return u.layer.MkdirAll(name, perm) - } - if dir { - return syscall.EEXIST - } - return u.layer.MkdirAll(name, perm) -} - -func (u *CopyOnWriteFs) Name() string { - return "CopyOnWriteFs" -} - -func (u *CopyOnWriteFs) MkdirAll(name string, perm os.FileMode) error { - dir, err := IsDir(u.base, name) - if err != nil { - return u.layer.MkdirAll(name, perm) - } - if dir { - return syscall.EEXIST - } - return u.layer.MkdirAll(name, perm) -} - -func (u *CopyOnWriteFs) Create(name string) (File, error) { - return u.OpenFile(name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666) -} diff --git a/vendor/github.com/spf13/afero/httpFs.go b/vendor/github.com/spf13/afero/httpFs.go deleted file mode 100644 index c42193688..000000000 --- a/vendor/github.com/spf13/afero/httpFs.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "errors" - "net/http" - "os" - "path" - "path/filepath" - "strings" - "time" -) - -type httpDir struct { - basePath string - fs HttpFs -} - -func (d httpDir) Open(name string) (http.File, error) { - if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 || - strings.Contains(name, "\x00") { - return nil, errors.New("http: invalid character in file path") - } - dir := string(d.basePath) - if dir == "" { - dir = "." - } - - f, err := d.fs.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name)))) - if err != nil { - return nil, err - } - return f, nil -} - -type HttpFs struct { - source Fs -} - -func NewHttpFs(source Fs) *HttpFs { - return &HttpFs{source: source} -} - -func (h HttpFs) Dir(s string) *httpDir { - return &httpDir{basePath: s, fs: h} -} - -func (h HttpFs) Name() string { return "h HttpFs" } - -func (h HttpFs) Create(name string) (File, error) { - return h.source.Create(name) -} - -func (h HttpFs) Chmod(name string, mode os.FileMode) error { - return h.source.Chmod(name, mode) -} - -func (h HttpFs) Chtimes(name string, atime time.Time, mtime time.Time) error { - return h.source.Chtimes(name, atime, mtime) -} - -func (h HttpFs) Mkdir(name string, perm os.FileMode) error { - return h.source.Mkdir(name, perm) -} - -func (h HttpFs) MkdirAll(path string, perm os.FileMode) error { - return h.source.MkdirAll(path, perm) -} - -func (h HttpFs) Open(name string) (http.File, error) { - f, err := h.source.Open(name) - if err == nil { - if httpfile, ok := f.(http.File); ok { - return httpfile, nil - } - } - return nil, err -} - -func (h HttpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - return h.source.OpenFile(name, flag, perm) -} - -func (h HttpFs) Remove(name string) error { - return h.source.Remove(name) -} - -func (h HttpFs) RemoveAll(path string) error { - return h.source.RemoveAll(path) -} - -func (h HttpFs) Rename(oldname, newname string) error { - return h.source.Rename(oldname, newname) -} - -func (h HttpFs) Stat(name string) (os.FileInfo, error) { - return h.source.Stat(name) -} diff --git a/vendor/github.com/spf13/afero/ioutil.go b/vendor/github.com/spf13/afero/ioutil.go deleted file mode 100644 index 5c3a3d8ff..000000000 --- a/vendor/github.com/spf13/afero/ioutil.go +++ /dev/null @@ -1,230 +0,0 @@ -// Copyright ©2015 The Go Authors -// Copyright ©2015 Steve Francia -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "bytes" - "io" - "os" - "path/filepath" - "sort" - "strconv" - "sync" - "time" -) - -// byName implements sort.Interface. -type byName []os.FileInfo - -func (f byName) Len() int { return len(f) } -func (f byName) Less(i, j int) bool { return f[i].Name() < f[j].Name() } -func (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] } - -// ReadDir reads the directory named by dirname and returns -// a list of sorted directory entries. -func (a Afero) ReadDir(dirname string) ([]os.FileInfo, error) { - return ReadDir(a.Fs, dirname) -} - -func ReadDir(fs Fs, dirname string) ([]os.FileInfo, error) { - f, err := fs.Open(dirname) - if err != nil { - return nil, err - } - list, err := f.Readdir(-1) - f.Close() - if err != nil { - return nil, err - } - sort.Sort(byName(list)) - return list, nil -} - -// ReadFile reads the file named by filename and returns the contents. -// A successful call returns err == nil, not err == EOF. Because ReadFile -// reads the whole file, it does not treat an EOF from Read as an error -// to be reported. -func (a Afero) ReadFile(filename string) ([]byte, error) { - return ReadFile(a.Fs, filename) -} - -func ReadFile(fs Fs, filename string) ([]byte, error) { - f, err := fs.Open(filename) - if err != nil { - return nil, err - } - defer f.Close() - // It's a good but not certain bet that FileInfo will tell us exactly how much to - // read, so let's try it but be prepared for the answer to be wrong. - var n int64 - - if fi, err := f.Stat(); err == nil { - // Don't preallocate a huge buffer, just in case. - if size := fi.Size(); size < 1e9 { - n = size - } - } - // As initial capacity for readAll, use n + a little extra in case Size is zero, - // and to avoid another allocation after Read has filled the buffer. The readAll - // call will read into its allocated internal buffer cheaply. If the size was - // wrong, we'll either waste some space off the end or reallocate as needed, but - // in the overwhelmingly common case we'll get it just right. - return readAll(f, n+bytes.MinRead) -} - -// readAll reads from r until an error or EOF and returns the data it read -// from the internal buffer allocated with a specified capacity. -func readAll(r io.Reader, capacity int64) (b []byte, err error) { - buf := bytes.NewBuffer(make([]byte, 0, capacity)) - // If the buffer overflows, we will get bytes.ErrTooLarge. - // Return that as an error. Any other panic remains. - defer func() { - e := recover() - if e == nil { - return - } - if panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge { - err = panicErr - } else { - panic(e) - } - }() - _, err = buf.ReadFrom(r) - return buf.Bytes(), err -} - -// ReadAll reads from r until an error or EOF and returns the data it read. -// A successful call returns err == nil, not err == EOF. Because ReadAll is -// defined to read from src until EOF, it does not treat an EOF from Read -// as an error to be reported. -func ReadAll(r io.Reader) ([]byte, error) { - return readAll(r, bytes.MinRead) -} - -// WriteFile writes data to a file named by filename. -// If the file does not exist, WriteFile creates it with permissions perm; -// otherwise WriteFile truncates it before writing. -func (a Afero) WriteFile(filename string, data []byte, perm os.FileMode) error { - return WriteFile(a.Fs, filename, data, perm) -} - -func WriteFile(fs Fs, filename string, data []byte, perm os.FileMode) error { - f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) - if err != nil { - return err - } - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - } - if err1 := f.Close(); err == nil { - err = err1 - } - return err -} - -// Random number state. -// We generate random temporary file names so that there's a good -// chance the file doesn't exist yet - keeps the number of tries in -// TempFile to a minimum. -var rand uint32 -var randmu sync.Mutex - -func reseed() uint32 { - return uint32(time.Now().UnixNano() + int64(os.Getpid())) -} - -func nextSuffix() string { - randmu.Lock() - r := rand - if r == 0 { - r = reseed() - } - r = r*1664525 + 1013904223 // constants from Numerical Recipes - rand = r - randmu.Unlock() - return strconv.Itoa(int(1e9 + r%1e9))[1:] -} - -// TempFile creates a new temporary file in the directory dir -// with a name beginning with prefix, opens the file for reading -// and writing, and returns the resulting *File. -// If dir is the empty string, TempFile uses the default directory -// for temporary files (see os.TempDir). -// Multiple programs calling TempFile simultaneously -// will not choose the same file. The caller can use f.Name() -// to find the pathname of the file. It is the caller's responsibility -// to remove the file when no longer needed. -func (a Afero) TempFile(dir, prefix string) (f File, err error) { - return TempFile(a.Fs, dir, prefix) -} - -func TempFile(fs Fs, dir, prefix string) (f File, err error) { - if dir == "" { - dir = os.TempDir() - } - - nconflict := 0 - for i := 0; i < 10000; i++ { - name := filepath.Join(dir, prefix+nextSuffix()) - f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) - if os.IsExist(err) { - if nconflict++; nconflict > 10 { - randmu.Lock() - rand = reseed() - randmu.Unlock() - } - continue - } - break - } - return -} - -// TempDir creates a new temporary directory in the directory dir -// with a name beginning with prefix and returns the path of the -// new directory. If dir is the empty string, TempDir uses the -// default directory for temporary files (see os.TempDir). -// Multiple programs calling TempDir simultaneously -// will not choose the same directory. It is the caller's responsibility -// to remove the directory when no longer needed. -func (a Afero) TempDir(dir, prefix string) (name string, err error) { - return TempDir(a.Fs, dir, prefix) -} -func TempDir(fs Fs, dir, prefix string) (name string, err error) { - if dir == "" { - dir = os.TempDir() - } - - nconflict := 0 - for i := 0; i < 10000; i++ { - try := filepath.Join(dir, prefix+nextSuffix()) - err = fs.Mkdir(try, 0700) - if os.IsExist(err) { - if nconflict++; nconflict > 10 { - randmu.Lock() - rand = reseed() - randmu.Unlock() - } - continue - } - if err == nil { - name = try - } - break - } - return -} diff --git a/vendor/github.com/spf13/afero/lstater.go b/vendor/github.com/spf13/afero/lstater.go deleted file mode 100644 index 89c1bfc0a..000000000 --- a/vendor/github.com/spf13/afero/lstater.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright © 2018 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "os" -) - -// Lstater is an optional interface in Afero. It is only implemented by the -// filesystems saying so. -// It will call Lstat if the filesystem iself is, or it delegates to, the os filesystem. -// Else it will call Stat. -// In addtion to the FileInfo, it will return a boolean telling whether Lstat was called or not. -type Lstater interface { - LstatIfPossible(name string) (os.FileInfo, bool, error) -} diff --git a/vendor/github.com/spf13/afero/match.go b/vendor/github.com/spf13/afero/match.go deleted file mode 100644 index c18a87fb7..000000000 --- a/vendor/github.com/spf13/afero/match.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright © 2014 Steve Francia . -// Copyright 2009 The Go Authors. All rights reserved. - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "path/filepath" - "sort" - "strings" -) - -// Glob returns the names of all files matching pattern or nil -// if there is no matching file. The syntax of patterns is the same -// as in Match. The pattern may describe hierarchical names such as -// /usr/*/bin/ed (assuming the Separator is '/'). -// -// Glob ignores file system errors such as I/O errors reading directories. -// The only possible returned error is ErrBadPattern, when pattern -// is malformed. -// -// This was adapted from (http://golang.org/pkg/path/filepath) and uses several -// built-ins from that package. -func Glob(fs Fs, pattern string) (matches []string, err error) { - if !hasMeta(pattern) { - // Lstat not supported by a ll filesystems. - if _, err = lstatIfPossible(fs, pattern); err != nil { - return nil, nil - } - return []string{pattern}, nil - } - - dir, file := filepath.Split(pattern) - switch dir { - case "": - dir = "." - case string(filepath.Separator): - // nothing - default: - dir = dir[0 : len(dir)-1] // chop off trailing separator - } - - if !hasMeta(dir) { - return glob(fs, dir, file, nil) - } - - var m []string - m, err = Glob(fs, dir) - if err != nil { - return - } - for _, d := range m { - matches, err = glob(fs, d, file, matches) - if err != nil { - return - } - } - return -} - -// glob searches for files matching pattern in the directory dir -// and appends them to matches. If the directory cannot be -// opened, it returns the existing matches. New matches are -// added in lexicographical order. -func glob(fs Fs, dir, pattern string, matches []string) (m []string, e error) { - m = matches - fi, err := fs.Stat(dir) - if err != nil { - return - } - if !fi.IsDir() { - return - } - d, err := fs.Open(dir) - if err != nil { - return - } - defer d.Close() - - names, _ := d.Readdirnames(-1) - sort.Strings(names) - - for _, n := range names { - matched, err := filepath.Match(pattern, n) - if err != nil { - return m, err - } - if matched { - m = append(m, filepath.Join(dir, n)) - } - } - return -} - -// hasMeta reports whether path contains any of the magic characters -// recognized by Match. -func hasMeta(path string) bool { - // TODO(niemeyer): Should other magic characters be added here? - return strings.IndexAny(path, "*?[") >= 0 -} diff --git a/vendor/github.com/spf13/afero/mem/dir.go b/vendor/github.com/spf13/afero/mem/dir.go deleted file mode 100644 index e104013f4..000000000 --- a/vendor/github.com/spf13/afero/mem/dir.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mem - -type Dir interface { - Len() int - Names() []string - Files() []*FileData - Add(*FileData) - Remove(*FileData) -} - -func RemoveFromMemDir(dir *FileData, f *FileData) { - dir.memDir.Remove(f) -} - -func AddToMemDir(dir *FileData, f *FileData) { - dir.memDir.Add(f) -} - -func InitializeDir(d *FileData) { - if d.memDir == nil { - d.dir = true - d.memDir = &DirMap{} - } -} diff --git a/vendor/github.com/spf13/afero/mem/dirmap.go b/vendor/github.com/spf13/afero/mem/dirmap.go deleted file mode 100644 index 03a57ee5b..000000000 --- a/vendor/github.com/spf13/afero/mem/dirmap.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright © 2015 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mem - -import "sort" - -type DirMap map[string]*FileData - -func (m DirMap) Len() int { return len(m) } -func (m DirMap) Add(f *FileData) { m[f.name] = f } -func (m DirMap) Remove(f *FileData) { delete(m, f.name) } -func (m DirMap) Files() (files []*FileData) { - for _, f := range m { - files = append(files, f) - } - sort.Sort(filesSorter(files)) - return files -} - -// implement sort.Interface for []*FileData -type filesSorter []*FileData - -func (s filesSorter) Len() int { return len(s) } -func (s filesSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s filesSorter) Less(i, j int) bool { return s[i].name < s[j].name } - -func (m DirMap) Names() (names []string) { - for x := range m { - names = append(names, x) - } - return names -} diff --git a/vendor/github.com/spf13/afero/mem/file.go b/vendor/github.com/spf13/afero/mem/file.go deleted file mode 100644 index 885e55429..000000000 --- a/vendor/github.com/spf13/afero/mem/file.go +++ /dev/null @@ -1,314 +0,0 @@ -// Copyright © 2015 Steve Francia . -// Copyright 2013 tsuru authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mem - -import ( - "bytes" - "errors" - "io" - "os" - "path/filepath" - "sync" - "sync/atomic" -) - -import "time" - -const FilePathSeparator = string(filepath.Separator) - -type File struct { - // atomic requires 64-bit alignment for struct field access - at int64 - readDirCount int64 - closed bool - readOnly bool - fileData *FileData -} - -func NewFileHandle(data *FileData) *File { - return &File{fileData: data} -} - -func NewReadOnlyFileHandle(data *FileData) *File { - return &File{fileData: data, readOnly: true} -} - -func (f File) Data() *FileData { - return f.fileData -} - -type FileData struct { - sync.Mutex - name string - data []byte - memDir Dir - dir bool - mode os.FileMode - modtime time.Time -} - -func (d *FileData) Name() string { - d.Lock() - defer d.Unlock() - return d.name -} - -func CreateFile(name string) *FileData { - return &FileData{name: name, mode: os.ModeTemporary, modtime: time.Now()} -} - -func CreateDir(name string) *FileData { - return &FileData{name: name, memDir: &DirMap{}, dir: true} -} - -func ChangeFileName(f *FileData, newname string) { - f.Lock() - f.name = newname - f.Unlock() -} - -func SetMode(f *FileData, mode os.FileMode) { - f.Lock() - f.mode = mode - f.Unlock() -} - -func SetModTime(f *FileData, mtime time.Time) { - f.Lock() - setModTime(f, mtime) - f.Unlock() -} - -func setModTime(f *FileData, mtime time.Time) { - f.modtime = mtime -} - -func GetFileInfo(f *FileData) *FileInfo { - return &FileInfo{f} -} - -func (f *File) Open() error { - atomic.StoreInt64(&f.at, 0) - atomic.StoreInt64(&f.readDirCount, 0) - f.fileData.Lock() - f.closed = false - f.fileData.Unlock() - return nil -} - -func (f *File) Close() error { - f.fileData.Lock() - f.closed = true - if !f.readOnly { - setModTime(f.fileData, time.Now()) - } - f.fileData.Unlock() - return nil -} - -func (f *File) Name() string { - return f.fileData.Name() -} - -func (f *File) Stat() (os.FileInfo, error) { - return &FileInfo{f.fileData}, nil -} - -func (f *File) Sync() error { - return nil -} - -func (f *File) Readdir(count int) (res []os.FileInfo, err error) { - var outLength int64 - - f.fileData.Lock() - files := f.fileData.memDir.Files()[f.readDirCount:] - if count > 0 { - if len(files) < count { - outLength = int64(len(files)) - } else { - outLength = int64(count) - } - if len(files) == 0 { - err = io.EOF - } - } else { - outLength = int64(len(files)) - } - f.readDirCount += outLength - f.fileData.Unlock() - - res = make([]os.FileInfo, outLength) - for i := range res { - res[i] = &FileInfo{files[i]} - } - - return res, err -} - -func (f *File) Readdirnames(n int) (names []string, err error) { - fi, err := f.Readdir(n) - names = make([]string, len(fi)) - for i, f := range fi { - _, names[i] = filepath.Split(f.Name()) - } - return names, err -} - -func (f *File) Read(b []byte) (n int, err error) { - f.fileData.Lock() - defer f.fileData.Unlock() - if f.closed == true { - return 0, ErrFileClosed - } - if len(b) > 0 && int(f.at) == len(f.fileData.data) { - return 0, io.EOF - } - if int(f.at) > len(f.fileData.data) { - return 0, io.ErrUnexpectedEOF - } - if len(f.fileData.data)-int(f.at) >= len(b) { - n = len(b) - } else { - n = len(f.fileData.data) - int(f.at) - } - copy(b, f.fileData.data[f.at:f.at+int64(n)]) - atomic.AddInt64(&f.at, int64(n)) - return -} - -func (f *File) ReadAt(b []byte, off int64) (n int, err error) { - atomic.StoreInt64(&f.at, off) - return f.Read(b) -} - -func (f *File) Truncate(size int64) error { - if f.closed == true { - return ErrFileClosed - } - if f.readOnly { - return &os.PathError{Op: "truncate", Path: f.fileData.name, Err: errors.New("file handle is read only")} - } - if size < 0 { - return ErrOutOfRange - } - if size > int64(len(f.fileData.data)) { - diff := size - int64(len(f.fileData.data)) - f.fileData.data = append(f.fileData.data, bytes.Repeat([]byte{00}, int(diff))...) - } else { - f.fileData.data = f.fileData.data[0:size] - } - setModTime(f.fileData, time.Now()) - return nil -} - -func (f *File) Seek(offset int64, whence int) (int64, error) { - if f.closed == true { - return 0, ErrFileClosed - } - switch whence { - case 0: - atomic.StoreInt64(&f.at, offset) - case 1: - atomic.AddInt64(&f.at, int64(offset)) - case 2: - atomic.StoreInt64(&f.at, int64(len(f.fileData.data))+offset) - } - return f.at, nil -} - -func (f *File) Write(b []byte) (n int, err error) { - if f.readOnly { - return 0, &os.PathError{Op: "write", Path: f.fileData.name, Err: errors.New("file handle is read only")} - } - n = len(b) - cur := atomic.LoadInt64(&f.at) - f.fileData.Lock() - defer f.fileData.Unlock() - diff := cur - int64(len(f.fileData.data)) - var tail []byte - if n+int(cur) < len(f.fileData.data) { - tail = f.fileData.data[n+int(cur):] - } - if diff > 0 { - f.fileData.data = append(bytes.Repeat([]byte{00}, int(diff)), b...) - f.fileData.data = append(f.fileData.data, tail...) - } else { - f.fileData.data = append(f.fileData.data[:cur], b...) - f.fileData.data = append(f.fileData.data, tail...) - } - setModTime(f.fileData, time.Now()) - - atomic.StoreInt64(&f.at, int64(len(f.fileData.data))) - return -} - -func (f *File) WriteAt(b []byte, off int64) (n int, err error) { - atomic.StoreInt64(&f.at, off) - return f.Write(b) -} - -func (f *File) WriteString(s string) (ret int, err error) { - return f.Write([]byte(s)) -} - -func (f *File) Info() *FileInfo { - return &FileInfo{f.fileData} -} - -type FileInfo struct { - *FileData -} - -// Implements os.FileInfo -func (s *FileInfo) Name() string { - s.Lock() - _, name := filepath.Split(s.name) - s.Unlock() - return name -} -func (s *FileInfo) Mode() os.FileMode { - s.Lock() - defer s.Unlock() - return s.mode -} -func (s *FileInfo) ModTime() time.Time { - s.Lock() - defer s.Unlock() - return s.modtime -} -func (s *FileInfo) IsDir() bool { - s.Lock() - defer s.Unlock() - return s.dir -} -func (s *FileInfo) Sys() interface{} { return nil } -func (s *FileInfo) Size() int64 { - if s.IsDir() { - return int64(42) - } - s.Lock() - defer s.Unlock() - return int64(len(s.data)) -} - -var ( - ErrFileClosed = errors.New("File is closed") - ErrOutOfRange = errors.New("Out of range") - ErrTooLarge = errors.New("Too large") - ErrFileNotFound = os.ErrNotExist - ErrFileExists = os.ErrExist - ErrDestinationExists = os.ErrExist -) diff --git a/vendor/github.com/spf13/afero/memmap.go b/vendor/github.com/spf13/afero/memmap.go deleted file mode 100644 index 09498e70f..000000000 --- a/vendor/github.com/spf13/afero/memmap.go +++ /dev/null @@ -1,365 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "fmt" - "log" - "os" - "path/filepath" - "strings" - "sync" - "time" - - "github.com/spf13/afero/mem" -) - -type MemMapFs struct { - mu sync.RWMutex - data map[string]*mem.FileData - init sync.Once -} - -func NewMemMapFs() Fs { - return &MemMapFs{} -} - -func (m *MemMapFs) getData() map[string]*mem.FileData { - m.init.Do(func() { - m.data = make(map[string]*mem.FileData) - // Root should always exist, right? - // TODO: what about windows? - m.data[FilePathSeparator] = mem.CreateDir(FilePathSeparator) - }) - return m.data -} - -func (*MemMapFs) Name() string { return "MemMapFS" } - -func (m *MemMapFs) Create(name string) (File, error) { - name = normalizePath(name) - m.mu.Lock() - file := mem.CreateFile(name) - m.getData()[name] = file - m.registerWithParent(file) - m.mu.Unlock() - return mem.NewFileHandle(file), nil -} - -func (m *MemMapFs) unRegisterWithParent(fileName string) error { - f, err := m.lockfreeOpen(fileName) - if err != nil { - return err - } - parent := m.findParent(f) - if parent == nil { - log.Panic("parent of ", f.Name(), " is nil") - } - - parent.Lock() - mem.RemoveFromMemDir(parent, f) - parent.Unlock() - return nil -} - -func (m *MemMapFs) findParent(f *mem.FileData) *mem.FileData { - pdir, _ := filepath.Split(f.Name()) - pdir = filepath.Clean(pdir) - pfile, err := m.lockfreeOpen(pdir) - if err != nil { - return nil - } - return pfile -} - -func (m *MemMapFs) registerWithParent(f *mem.FileData) { - if f == nil { - return - } - parent := m.findParent(f) - if parent == nil { - pdir := filepath.Dir(filepath.Clean(f.Name())) - err := m.lockfreeMkdir(pdir, 0777) - if err != nil { - //log.Println("Mkdir error:", err) - return - } - parent, err = m.lockfreeOpen(pdir) - if err != nil { - //log.Println("Open after Mkdir error:", err) - return - } - } - - parent.Lock() - mem.InitializeDir(parent) - mem.AddToMemDir(parent, f) - parent.Unlock() -} - -func (m *MemMapFs) lockfreeMkdir(name string, perm os.FileMode) error { - name = normalizePath(name) - x, ok := m.getData()[name] - if ok { - // Only return ErrFileExists if it's a file, not a directory. - i := mem.FileInfo{FileData: x} - if !i.IsDir() { - return ErrFileExists - } - } else { - item := mem.CreateDir(name) - m.getData()[name] = item - m.registerWithParent(item) - } - return nil -} - -func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error { - name = normalizePath(name) - - m.mu.RLock() - _, ok := m.getData()[name] - m.mu.RUnlock() - if ok { - return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists} - } - - m.mu.Lock() - item := mem.CreateDir(name) - m.getData()[name] = item - m.registerWithParent(item) - m.mu.Unlock() - - m.Chmod(name, perm|os.ModeDir) - - return nil -} - -func (m *MemMapFs) MkdirAll(path string, perm os.FileMode) error { - err := m.Mkdir(path, perm) - if err != nil { - if err.(*os.PathError).Err == ErrFileExists { - return nil - } - return err - } - return nil -} - -// Handle some relative paths -func normalizePath(path string) string { - path = filepath.Clean(path) - - switch path { - case ".": - return FilePathSeparator - case "..": - return FilePathSeparator - default: - return path - } -} - -func (m *MemMapFs) Open(name string) (File, error) { - f, err := m.open(name) - if f != nil { - return mem.NewReadOnlyFileHandle(f), err - } - return nil, err -} - -func (m *MemMapFs) openWrite(name string) (File, error) { - f, err := m.open(name) - if f != nil { - return mem.NewFileHandle(f), err - } - return nil, err -} - -func (m *MemMapFs) open(name string) (*mem.FileData, error) { - name = normalizePath(name) - - m.mu.RLock() - f, ok := m.getData()[name] - m.mu.RUnlock() - if !ok { - return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileNotFound} - } - return f, nil -} - -func (m *MemMapFs) lockfreeOpen(name string) (*mem.FileData, error) { - name = normalizePath(name) - f, ok := m.getData()[name] - if ok { - return f, nil - } else { - return nil, ErrFileNotFound - } -} - -func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - chmod := false - file, err := m.openWrite(name) - if os.IsNotExist(err) && (flag&os.O_CREATE > 0) { - file, err = m.Create(name) - chmod = true - } - if err != nil { - return nil, err - } - if flag == os.O_RDONLY { - file = mem.NewReadOnlyFileHandle(file.(*mem.File).Data()) - } - if flag&os.O_APPEND > 0 { - _, err = file.Seek(0, os.SEEK_END) - if err != nil { - file.Close() - return nil, err - } - } - if flag&os.O_TRUNC > 0 && flag&(os.O_RDWR|os.O_WRONLY) > 0 { - err = file.Truncate(0) - if err != nil { - file.Close() - return nil, err - } - } - if chmod { - m.Chmod(name, perm) - } - return file, nil -} - -func (m *MemMapFs) Remove(name string) error { - name = normalizePath(name) - - m.mu.Lock() - defer m.mu.Unlock() - - if _, ok := m.getData()[name]; ok { - err := m.unRegisterWithParent(name) - if err != nil { - return &os.PathError{Op: "remove", Path: name, Err: err} - } - delete(m.getData(), name) - } else { - return &os.PathError{Op: "remove", Path: name, Err: os.ErrNotExist} - } - return nil -} - -func (m *MemMapFs) RemoveAll(path string) error { - path = normalizePath(path) - m.mu.Lock() - m.unRegisterWithParent(path) - m.mu.Unlock() - - m.mu.RLock() - defer m.mu.RUnlock() - - for p, _ := range m.getData() { - if strings.HasPrefix(p, path) { - m.mu.RUnlock() - m.mu.Lock() - delete(m.getData(), p) - m.mu.Unlock() - m.mu.RLock() - } - } - return nil -} - -func (m *MemMapFs) Rename(oldname, newname string) error { - oldname = normalizePath(oldname) - newname = normalizePath(newname) - - if oldname == newname { - return nil - } - - m.mu.RLock() - defer m.mu.RUnlock() - if _, ok := m.getData()[oldname]; ok { - m.mu.RUnlock() - m.mu.Lock() - m.unRegisterWithParent(oldname) - fileData := m.getData()[oldname] - delete(m.getData(), oldname) - mem.ChangeFileName(fileData, newname) - m.getData()[newname] = fileData - m.registerWithParent(fileData) - m.mu.Unlock() - m.mu.RLock() - } else { - return &os.PathError{Op: "rename", Path: oldname, Err: ErrFileNotFound} - } - return nil -} - -func (m *MemMapFs) Stat(name string) (os.FileInfo, error) { - f, err := m.Open(name) - if err != nil { - return nil, err - } - fi := mem.GetFileInfo(f.(*mem.File).Data()) - return fi, nil -} - -func (m *MemMapFs) Chmod(name string, mode os.FileMode) error { - name = normalizePath(name) - - m.mu.RLock() - f, ok := m.getData()[name] - m.mu.RUnlock() - if !ok { - return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound} - } - - m.mu.Lock() - mem.SetMode(f, mode) - m.mu.Unlock() - - return nil -} - -func (m *MemMapFs) Chtimes(name string, atime time.Time, mtime time.Time) error { - name = normalizePath(name) - - m.mu.RLock() - f, ok := m.getData()[name] - m.mu.RUnlock() - if !ok { - return &os.PathError{Op: "chtimes", Path: name, Err: ErrFileNotFound} - } - - m.mu.Lock() - mem.SetModTime(f, mtime) - m.mu.Unlock() - - return nil -} - -func (m *MemMapFs) List() { - for _, x := range m.data { - y := mem.FileInfo{FileData: x} - fmt.Println(x.Name(), y.Size()) - } -} - -// func debugMemMapList(fs Fs) { -// if x, ok := fs.(*MemMapFs); ok { -// x.List() -// } -// } diff --git a/vendor/github.com/spf13/afero/os.go b/vendor/github.com/spf13/afero/os.go deleted file mode 100644 index 13cc1b84c..000000000 --- a/vendor/github.com/spf13/afero/os.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright © 2014 Steve Francia . -// Copyright 2013 tsuru authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "os" - "time" -) - -var _ Lstater = (*OsFs)(nil) - -// OsFs is a Fs implementation that uses functions provided by the os package. -// -// For details in any method, check the documentation of the os package -// (http://golang.org/pkg/os/). -type OsFs struct{} - -func NewOsFs() Fs { - return &OsFs{} -} - -func (OsFs) Name() string { return "OsFs" } - -func (OsFs) Create(name string) (File, error) { - f, e := os.Create(name) - if f == nil { - // while this looks strange, we need to return a bare nil (of type nil) not - // a nil value of type *os.File or nil won't be nil - return nil, e - } - return f, e -} - -func (OsFs) Mkdir(name string, perm os.FileMode) error { - return os.Mkdir(name, perm) -} - -func (OsFs) MkdirAll(path string, perm os.FileMode) error { - return os.MkdirAll(path, perm) -} - -func (OsFs) Open(name string) (File, error) { - f, e := os.Open(name) - if f == nil { - // while this looks strange, we need to return a bare nil (of type nil) not - // a nil value of type *os.File or nil won't be nil - return nil, e - } - return f, e -} - -func (OsFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - f, e := os.OpenFile(name, flag, perm) - if f == nil { - // while this looks strange, we need to return a bare nil (of type nil) not - // a nil value of type *os.File or nil won't be nil - return nil, e - } - return f, e -} - -func (OsFs) Remove(name string) error { - return os.Remove(name) -} - -func (OsFs) RemoveAll(path string) error { - return os.RemoveAll(path) -} - -func (OsFs) Rename(oldname, newname string) error { - return os.Rename(oldname, newname) -} - -func (OsFs) Stat(name string) (os.FileInfo, error) { - return os.Stat(name) -} - -func (OsFs) Chmod(name string, mode os.FileMode) error { - return os.Chmod(name, mode) -} - -func (OsFs) Chtimes(name string, atime time.Time, mtime time.Time) error { - return os.Chtimes(name, atime, mtime) -} - -func (OsFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - fi, err := os.Lstat(name) - return fi, true, err -} diff --git a/vendor/github.com/spf13/afero/path.go b/vendor/github.com/spf13/afero/path.go deleted file mode 100644 index 18f60a0f6..000000000 --- a/vendor/github.com/spf13/afero/path.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright ©2015 The Go Authors -// Copyright ©2015 Steve Francia -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "os" - "path/filepath" - "sort" -) - -// readDirNames reads the directory named by dirname and returns -// a sorted list of directory entries. -// adapted from https://golang.org/src/path/filepath/path.go -func readDirNames(fs Fs, dirname string) ([]string, error) { - f, err := fs.Open(dirname) - if err != nil { - return nil, err - } - names, err := f.Readdirnames(-1) - f.Close() - if err != nil { - return nil, err - } - sort.Strings(names) - return names, nil -} - -// walk recursively descends path, calling walkFn -// adapted from https://golang.org/src/path/filepath/path.go -func walk(fs Fs, path string, info os.FileInfo, walkFn filepath.WalkFunc) error { - err := walkFn(path, info, nil) - if err != nil { - if info.IsDir() && err == filepath.SkipDir { - return nil - } - return err - } - - if !info.IsDir() { - return nil - } - - names, err := readDirNames(fs, path) - if err != nil { - return walkFn(path, info, err) - } - - for _, name := range names { - filename := filepath.Join(path, name) - fileInfo, err := lstatIfPossible(fs, filename) - if err != nil { - if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir { - return err - } - } else { - err = walk(fs, filename, fileInfo, walkFn) - if err != nil { - if !fileInfo.IsDir() || err != filepath.SkipDir { - return err - } - } - } - } - return nil -} - -// if the filesystem supports it, use Lstat, else use fs.Stat -func lstatIfPossible(fs Fs, path string) (os.FileInfo, error) { - if lfs, ok := fs.(Lstater); ok { - fi, _, err := lfs.LstatIfPossible(path) - return fi, err - } - return fs.Stat(path) -} - -// Walk walks the file tree rooted at root, calling walkFn for each file or -// directory in the tree, including root. All errors that arise visiting files -// and directories are filtered by walkFn. The files are walked in lexical -// order, which makes the output deterministic but means that for very -// large directories Walk can be inefficient. -// Walk does not follow symbolic links. - -func (a Afero) Walk(root string, walkFn filepath.WalkFunc) error { - return Walk(a.Fs, root, walkFn) -} - -func Walk(fs Fs, root string, walkFn filepath.WalkFunc) error { - info, err := lstatIfPossible(fs, root) - if err != nil { - return walkFn(root, nil, err) - } - return walk(fs, root, info, walkFn) -} diff --git a/vendor/github.com/spf13/afero/readonlyfs.go b/vendor/github.com/spf13/afero/readonlyfs.go deleted file mode 100644 index c6376ec37..000000000 --- a/vendor/github.com/spf13/afero/readonlyfs.go +++ /dev/null @@ -1,80 +0,0 @@ -package afero - -import ( - "os" - "syscall" - "time" -) - -var _ Lstater = (*ReadOnlyFs)(nil) - -type ReadOnlyFs struct { - source Fs -} - -func NewReadOnlyFs(source Fs) Fs { - return &ReadOnlyFs{source: source} -} - -func (r *ReadOnlyFs) ReadDir(name string) ([]os.FileInfo, error) { - return ReadDir(r.source, name) -} - -func (r *ReadOnlyFs) Chtimes(n string, a, m time.Time) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Chmod(n string, m os.FileMode) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Name() string { - return "ReadOnlyFilter" -} - -func (r *ReadOnlyFs) Stat(name string) (os.FileInfo, error) { - return r.source.Stat(name) -} - -func (r *ReadOnlyFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { - if lsf, ok := r.source.(Lstater); ok { - return lsf.LstatIfPossible(name) - } - fi, err := r.Stat(name) - return fi, false, err -} - -func (r *ReadOnlyFs) Rename(o, n string) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) RemoveAll(p string) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Remove(n string) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { - return nil, syscall.EPERM - } - return r.source.OpenFile(name, flag, perm) -} - -func (r *ReadOnlyFs) Open(n string) (File, error) { - return r.source.Open(n) -} - -func (r *ReadOnlyFs) Mkdir(n string, p os.FileMode) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) MkdirAll(n string, p os.FileMode) error { - return syscall.EPERM -} - -func (r *ReadOnlyFs) Create(n string) (File, error) { - return nil, syscall.EPERM -} diff --git a/vendor/github.com/spf13/afero/regexpfs.go b/vendor/github.com/spf13/afero/regexpfs.go deleted file mode 100644 index 9d92dbc05..000000000 --- a/vendor/github.com/spf13/afero/regexpfs.go +++ /dev/null @@ -1,214 +0,0 @@ -package afero - -import ( - "os" - "regexp" - "syscall" - "time" -) - -// The RegexpFs filters files (not directories) by regular expression. Only -// files matching the given regexp will be allowed, all others get a ENOENT error ( -// "No such file or directory"). -// -type RegexpFs struct { - re *regexp.Regexp - source Fs -} - -func NewRegexpFs(source Fs, re *regexp.Regexp) Fs { - return &RegexpFs{source: source, re: re} -} - -type RegexpFile struct { - f File - re *regexp.Regexp -} - -func (r *RegexpFs) matchesName(name string) error { - if r.re == nil { - return nil - } - if r.re.MatchString(name) { - return nil - } - return syscall.ENOENT -} - -func (r *RegexpFs) dirOrMatches(name string) error { - dir, err := IsDir(r.source, name) - if err != nil { - return err - } - if dir { - return nil - } - return r.matchesName(name) -} - -func (r *RegexpFs) Chtimes(name string, a, m time.Time) error { - if err := r.dirOrMatches(name); err != nil { - return err - } - return r.source.Chtimes(name, a, m) -} - -func (r *RegexpFs) Chmod(name string, mode os.FileMode) error { - if err := r.dirOrMatches(name); err != nil { - return err - } - return r.source.Chmod(name, mode) -} - -func (r *RegexpFs) Name() string { - return "RegexpFs" -} - -func (r *RegexpFs) Stat(name string) (os.FileInfo, error) { - if err := r.dirOrMatches(name); err != nil { - return nil, err - } - return r.source.Stat(name) -} - -func (r *RegexpFs) Rename(oldname, newname string) error { - dir, err := IsDir(r.source, oldname) - if err != nil { - return err - } - if dir { - return nil - } - if err := r.matchesName(oldname); err != nil { - return err - } - if err := r.matchesName(newname); err != nil { - return err - } - return r.source.Rename(oldname, newname) -} - -func (r *RegexpFs) RemoveAll(p string) error { - dir, err := IsDir(r.source, p) - if err != nil { - return err - } - if !dir { - if err := r.matchesName(p); err != nil { - return err - } - } - return r.source.RemoveAll(p) -} - -func (r *RegexpFs) Remove(name string) error { - if err := r.dirOrMatches(name); err != nil { - return err - } - return r.source.Remove(name) -} - -func (r *RegexpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - if err := r.dirOrMatches(name); err != nil { - return nil, err - } - return r.source.OpenFile(name, flag, perm) -} - -func (r *RegexpFs) Open(name string) (File, error) { - dir, err := IsDir(r.source, name) - if err != nil { - return nil, err - } - if !dir { - if err := r.matchesName(name); err != nil { - return nil, err - } - } - f, err := r.source.Open(name) - return &RegexpFile{f: f, re: r.re}, nil -} - -func (r *RegexpFs) Mkdir(n string, p os.FileMode) error { - return r.source.Mkdir(n, p) -} - -func (r *RegexpFs) MkdirAll(n string, p os.FileMode) error { - return r.source.MkdirAll(n, p) -} - -func (r *RegexpFs) Create(name string) (File, error) { - if err := r.matchesName(name); err != nil { - return nil, err - } - return r.source.Create(name) -} - -func (f *RegexpFile) Close() error { - return f.f.Close() -} - -func (f *RegexpFile) Read(s []byte) (int, error) { - return f.f.Read(s) -} - -func (f *RegexpFile) ReadAt(s []byte, o int64) (int, error) { - return f.f.ReadAt(s, o) -} - -func (f *RegexpFile) Seek(o int64, w int) (int64, error) { - return f.f.Seek(o, w) -} - -func (f *RegexpFile) Write(s []byte) (int, error) { - return f.f.Write(s) -} - -func (f *RegexpFile) WriteAt(s []byte, o int64) (int, error) { - return f.f.WriteAt(s, o) -} - -func (f *RegexpFile) Name() string { - return f.f.Name() -} - -func (f *RegexpFile) Readdir(c int) (fi []os.FileInfo, err error) { - var rfi []os.FileInfo - rfi, err = f.f.Readdir(c) - if err != nil { - return nil, err - } - for _, i := range rfi { - if i.IsDir() || f.re.MatchString(i.Name()) { - fi = append(fi, i) - } - } - return fi, nil -} - -func (f *RegexpFile) Readdirnames(c int) (n []string, err error) { - fi, err := f.Readdir(c) - if err != nil { - return nil, err - } - for _, s := range fi { - n = append(n, s.Name()) - } - return n, nil -} - -func (f *RegexpFile) Stat() (os.FileInfo, error) { - return f.f.Stat() -} - -func (f *RegexpFile) Sync() error { - return f.f.Sync() -} - -func (f *RegexpFile) Truncate(s int64) error { - return f.f.Truncate(s) -} - -func (f *RegexpFile) WriteString(s string) (int, error) { - return f.f.WriteString(s) -} diff --git a/vendor/github.com/spf13/afero/unionFile.go b/vendor/github.com/spf13/afero/unionFile.go deleted file mode 100644 index 1e78f7d1e..000000000 --- a/vendor/github.com/spf13/afero/unionFile.go +++ /dev/null @@ -1,305 +0,0 @@ -package afero - -import ( - "io" - "os" - "path/filepath" - "syscall" -) - -// The UnionFile implements the afero.File interface and will be returned -// when reading a directory present at least in the overlay or opening a file -// for writing. -// -// The calls to -// Readdir() and Readdirnames() merge the file os.FileInfo / names from the -// base and the overlay - for files present in both layers, only those -// from the overlay will be used. -// -// When opening files for writing (Create() / OpenFile() with the right flags) -// the operations will be done in both layers, starting with the overlay. A -// successful read in the overlay will move the cursor position in the base layer -// by the number of bytes read. -type UnionFile struct { - Base File - Layer File - Merger DirsMerger - off int - files []os.FileInfo -} - -func (f *UnionFile) Close() error { - // first close base, so we have a newer timestamp in the overlay. If we'd close - // the overlay first, we'd get a cacheStale the next time we access this file - // -> cache would be useless ;-) - if f.Base != nil { - f.Base.Close() - } - if f.Layer != nil { - return f.Layer.Close() - } - return BADFD -} - -func (f *UnionFile) Read(s []byte) (int, error) { - if f.Layer != nil { - n, err := f.Layer.Read(s) - if (err == nil || err == io.EOF) && f.Base != nil { - // advance the file position also in the base file, the next - // call may be a write at this position (or a seek with SEEK_CUR) - if _, seekErr := f.Base.Seek(int64(n), os.SEEK_CUR); seekErr != nil { - // only overwrite err in case the seek fails: we need to - // report an eventual io.EOF to the caller - err = seekErr - } - } - return n, err - } - if f.Base != nil { - return f.Base.Read(s) - } - return 0, BADFD -} - -func (f *UnionFile) ReadAt(s []byte, o int64) (int, error) { - if f.Layer != nil { - n, err := f.Layer.ReadAt(s, o) - if (err == nil || err == io.EOF) && f.Base != nil { - _, err = f.Base.Seek(o+int64(n), os.SEEK_SET) - } - return n, err - } - if f.Base != nil { - return f.Base.ReadAt(s, o) - } - return 0, BADFD -} - -func (f *UnionFile) Seek(o int64, w int) (pos int64, err error) { - if f.Layer != nil { - pos, err = f.Layer.Seek(o, w) - if (err == nil || err == io.EOF) && f.Base != nil { - _, err = f.Base.Seek(o, w) - } - return pos, err - } - if f.Base != nil { - return f.Base.Seek(o, w) - } - return 0, BADFD -} - -func (f *UnionFile) Write(s []byte) (n int, err error) { - if f.Layer != nil { - n, err = f.Layer.Write(s) - if err == nil && f.Base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark? - _, err = f.Base.Write(s) - } - return n, err - } - if f.Base != nil { - return f.Base.Write(s) - } - return 0, BADFD -} - -func (f *UnionFile) WriteAt(s []byte, o int64) (n int, err error) { - if f.Layer != nil { - n, err = f.Layer.WriteAt(s, o) - if err == nil && f.Base != nil { - _, err = f.Base.WriteAt(s, o) - } - return n, err - } - if f.Base != nil { - return f.Base.WriteAt(s, o) - } - return 0, BADFD -} - -func (f *UnionFile) Name() string { - if f.Layer != nil { - return f.Layer.Name() - } - return f.Base.Name() -} - -// DirsMerger is how UnionFile weaves two directories together. -// It takes the FileInfo slices from the layer and the base and returns a -// single view. -type DirsMerger func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) - -var defaultUnionMergeDirsFn = func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) { - var files = make(map[string]os.FileInfo) - - for _, fi := range lofi { - files[fi.Name()] = fi - } - - for _, fi := range bofi { - if _, exists := files[fi.Name()]; !exists { - files[fi.Name()] = fi - } - } - - rfi := make([]os.FileInfo, len(files)) - - i := 0 - for _, fi := range files { - rfi[i] = fi - i++ - } - - return rfi, nil - -} - -// Readdir will weave the two directories together and -// return a single view of the overlayed directories -func (f *UnionFile) Readdir(c int) (ofi []os.FileInfo, err error) { - var merge DirsMerger = f.Merger - if merge == nil { - merge = defaultUnionMergeDirsFn - } - - if f.off == 0 { - var lfi []os.FileInfo - if f.Layer != nil { - lfi, err = f.Layer.Readdir(-1) - if err != nil { - return nil, err - } - } - - var bfi []os.FileInfo - if f.Base != nil { - bfi, err = f.Base.Readdir(-1) - if err != nil { - return nil, err - } - - } - merged, err := merge(lfi, bfi) - if err != nil { - return nil, err - } - f.files = append(f.files, merged...) - } - if c == -1 { - return f.files[f.off:], nil - } - defer func() { f.off += c }() - return f.files[f.off:c], nil -} - -func (f *UnionFile) Readdirnames(c int) ([]string, error) { - rfi, err := f.Readdir(c) - if err != nil { - return nil, err - } - var names []string - for _, fi := range rfi { - names = append(names, fi.Name()) - } - return names, nil -} - -func (f *UnionFile) Stat() (os.FileInfo, error) { - if f.Layer != nil { - return f.Layer.Stat() - } - if f.Base != nil { - return f.Base.Stat() - } - return nil, BADFD -} - -func (f *UnionFile) Sync() (err error) { - if f.Layer != nil { - err = f.Layer.Sync() - if err == nil && f.Base != nil { - err = f.Base.Sync() - } - return err - } - if f.Base != nil { - return f.Base.Sync() - } - return BADFD -} - -func (f *UnionFile) Truncate(s int64) (err error) { - if f.Layer != nil { - err = f.Layer.Truncate(s) - if err == nil && f.Base != nil { - err = f.Base.Truncate(s) - } - return err - } - if f.Base != nil { - return f.Base.Truncate(s) - } - return BADFD -} - -func (f *UnionFile) WriteString(s string) (n int, err error) { - if f.Layer != nil { - n, err = f.Layer.WriteString(s) - if err == nil && f.Base != nil { - _, err = f.Base.WriteString(s) - } - return n, err - } - if f.Base != nil { - return f.Base.WriteString(s) - } - return 0, BADFD -} - -func copyToLayer(base Fs, layer Fs, name string) error { - bfh, err := base.Open(name) - if err != nil { - return err - } - defer bfh.Close() - - // First make sure the directory exists - exists, err := Exists(layer, filepath.Dir(name)) - if err != nil { - return err - } - if !exists { - err = layer.MkdirAll(filepath.Dir(name), 0777) // FIXME? - if err != nil { - return err - } - } - - // Create the file on the overlay - lfh, err := layer.Create(name) - if err != nil { - return err - } - n, err := io.Copy(lfh, bfh) - if err != nil { - // If anything fails, clean up the file - layer.Remove(name) - lfh.Close() - return err - } - - bfi, err := bfh.Stat() - if err != nil || bfi.Size() != n { - layer.Remove(name) - lfh.Close() - return syscall.EIO - } - - err = lfh.Close() - if err != nil { - layer.Remove(name) - lfh.Close() - return err - } - return layer.Chtimes(name, bfi.ModTime(), bfi.ModTime()) -} diff --git a/vendor/github.com/spf13/afero/util.go b/vendor/github.com/spf13/afero/util.go deleted file mode 100644 index 4f253f481..000000000 --- a/vendor/github.com/spf13/afero/util.go +++ /dev/null @@ -1,330 +0,0 @@ -// Copyright ©2015 Steve Francia -// Portions Copyright ©2015 The Hugo Authors -// Portions Copyright 2016-present Bjørn Erik Pedersen -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "unicode" - - "golang.org/x/text/transform" - "golang.org/x/text/unicode/norm" -) - -// Filepath separator defined by os.Separator. -const FilePathSeparator = string(filepath.Separator) - -// Takes a reader and a path and writes the content -func (a Afero) WriteReader(path string, r io.Reader) (err error) { - return WriteReader(a.Fs, path, r) -} - -func WriteReader(fs Fs, path string, r io.Reader) (err error) { - dir, _ := filepath.Split(path) - ospath := filepath.FromSlash(dir) - - if ospath != "" { - err = fs.MkdirAll(ospath, 0777) // rwx, rw, r - if err != nil { - if err != os.ErrExist { - return err - } - } - } - - file, err := fs.Create(path) - if err != nil { - return - } - defer file.Close() - - _, err = io.Copy(file, r) - return -} - -// Same as WriteReader but checks to see if file/directory already exists. -func (a Afero) SafeWriteReader(path string, r io.Reader) (err error) { - return SafeWriteReader(a.Fs, path, r) -} - -func SafeWriteReader(fs Fs, path string, r io.Reader) (err error) { - dir, _ := filepath.Split(path) - ospath := filepath.FromSlash(dir) - - if ospath != "" { - err = fs.MkdirAll(ospath, 0777) // rwx, rw, r - if err != nil { - return - } - } - - exists, err := Exists(fs, path) - if err != nil { - return - } - if exists { - return fmt.Errorf("%v already exists", path) - } - - file, err := fs.Create(path) - if err != nil { - return - } - defer file.Close() - - _, err = io.Copy(file, r) - return -} - -func (a Afero) GetTempDir(subPath string) string { - return GetTempDir(a.Fs, subPath) -} - -// GetTempDir returns the default temp directory with trailing slash -// if subPath is not empty then it will be created recursively with mode 777 rwx rwx rwx -func GetTempDir(fs Fs, subPath string) string { - addSlash := func(p string) string { - if FilePathSeparator != p[len(p)-1:] { - p = p + FilePathSeparator - } - return p - } - dir := addSlash(os.TempDir()) - - if subPath != "" { - // preserve windows backslash :-( - if FilePathSeparator == "\\" { - subPath = strings.Replace(subPath, "\\", "____", -1) - } - dir = dir + UnicodeSanitize((subPath)) - if FilePathSeparator == "\\" { - dir = strings.Replace(dir, "____", "\\", -1) - } - - if exists, _ := Exists(fs, dir); exists { - return addSlash(dir) - } - - err := fs.MkdirAll(dir, 0777) - if err != nil { - panic(err) - } - dir = addSlash(dir) - } - return dir -} - -// Rewrite string to remove non-standard path characters -func UnicodeSanitize(s string) string { - source := []rune(s) - target := make([]rune, 0, len(source)) - - for _, r := range source { - if unicode.IsLetter(r) || - unicode.IsDigit(r) || - unicode.IsMark(r) || - r == '.' || - r == '/' || - r == '\\' || - r == '_' || - r == '-' || - r == '%' || - r == ' ' || - r == '#' { - target = append(target, r) - } - } - - return string(target) -} - -// Transform characters with accents into plain forms. -func NeuterAccents(s string) string { - t := transform.Chain(norm.NFD, transform.RemoveFunc(isMn), norm.NFC) - result, _, _ := transform.String(t, string(s)) - - return result -} - -func isMn(r rune) bool { - return unicode.Is(unicode.Mn, r) // Mn: nonspacing marks -} - -func (a Afero) FileContainsBytes(filename string, subslice []byte) (bool, error) { - return FileContainsBytes(a.Fs, filename, subslice) -} - -// Check if a file contains a specified byte slice. -func FileContainsBytes(fs Fs, filename string, subslice []byte) (bool, error) { - f, err := fs.Open(filename) - if err != nil { - return false, err - } - defer f.Close() - - return readerContainsAny(f, subslice), nil -} - -func (a Afero) FileContainsAnyBytes(filename string, subslices [][]byte) (bool, error) { - return FileContainsAnyBytes(a.Fs, filename, subslices) -} - -// Check if a file contains any of the specified byte slices. -func FileContainsAnyBytes(fs Fs, filename string, subslices [][]byte) (bool, error) { - f, err := fs.Open(filename) - if err != nil { - return false, err - } - defer f.Close() - - return readerContainsAny(f, subslices...), nil -} - -// readerContains reports whether any of the subslices is within r. -func readerContainsAny(r io.Reader, subslices ...[]byte) bool { - - if r == nil || len(subslices) == 0 { - return false - } - - largestSlice := 0 - - for _, sl := range subslices { - if len(sl) > largestSlice { - largestSlice = len(sl) - } - } - - if largestSlice == 0 { - return false - } - - bufflen := largestSlice * 4 - halflen := bufflen / 2 - buff := make([]byte, bufflen) - var err error - var n, i int - - for { - i++ - if i == 1 { - n, err = io.ReadAtLeast(r, buff[:halflen], halflen) - } else { - if i != 2 { - // shift left to catch overlapping matches - copy(buff[:], buff[halflen:]) - } - n, err = io.ReadAtLeast(r, buff[halflen:], halflen) - } - - if n > 0 { - for _, sl := range subslices { - if bytes.Contains(buff, sl) { - return true - } - } - } - - if err != nil { - break - } - } - return false -} - -func (a Afero) DirExists(path string) (bool, error) { - return DirExists(a.Fs, path) -} - -// DirExists checks if a path exists and is a directory. -func DirExists(fs Fs, path string) (bool, error) { - fi, err := fs.Stat(path) - if err == nil && fi.IsDir() { - return true, nil - } - if os.IsNotExist(err) { - return false, nil - } - return false, err -} - -func (a Afero) IsDir(path string) (bool, error) { - return IsDir(a.Fs, path) -} - -// IsDir checks if a given path is a directory. -func IsDir(fs Fs, path string) (bool, error) { - fi, err := fs.Stat(path) - if err != nil { - return false, err - } - return fi.IsDir(), nil -} - -func (a Afero) IsEmpty(path string) (bool, error) { - return IsEmpty(a.Fs, path) -} - -// IsEmpty checks if a given file or directory is empty. -func IsEmpty(fs Fs, path string) (bool, error) { - if b, _ := Exists(fs, path); !b { - return false, fmt.Errorf("%q path does not exist", path) - } - fi, err := fs.Stat(path) - if err != nil { - return false, err - } - if fi.IsDir() { - f, err := fs.Open(path) - if err != nil { - return false, err - } - defer f.Close() - list, err := f.Readdir(-1) - return len(list) == 0, nil - } - return fi.Size() == 0, nil -} - -func (a Afero) Exists(path string) (bool, error) { - return Exists(a.Fs, path) -} - -// Check if a file or directory exists. -func Exists(fs Fs, path string) (bool, error) { - _, err := fs.Stat(path) - if err == nil { - return true, nil - } - if os.IsNotExist(err) { - return false, nil - } - return false, err -} - -func FullBaseFsPath(basePathFs *BasePathFs, relativePath string) string { - combinedPath := filepath.Join(basePathFs.path, relativePath) - if parent, ok := basePathFs.source.(*BasePathFs); ok { - return FullBaseFsPath(parent, combinedPath) - } - - return combinedPath -} diff --git a/vendor/github.com/spf13/cast/LICENSE b/vendor/github.com/spf13/cast/LICENSE deleted file mode 100644 index 4527efb9c..000000000 --- a/vendor/github.com/spf13/cast/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Steve Francia - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/spf13/cast/cast.go b/vendor/github.com/spf13/cast/cast.go deleted file mode 100644 index 8b8c208be..000000000 --- a/vendor/github.com/spf13/cast/cast.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -// Package cast provides easy and safe casting in Go. -package cast - -import "time" - -// ToBool casts an interface to a bool type. -func ToBool(i interface{}) bool { - v, _ := ToBoolE(i) - return v -} - -// ToTime casts an interface to a time.Time type. -func ToTime(i interface{}) time.Time { - v, _ := ToTimeE(i) - return v -} - -// ToDuration casts an interface to a time.Duration type. -func ToDuration(i interface{}) time.Duration { - v, _ := ToDurationE(i) - return v -} - -// ToFloat64 casts an interface to a float64 type. -func ToFloat64(i interface{}) float64 { - v, _ := ToFloat64E(i) - return v -} - -// ToFloat32 casts an interface to a float32 type. -func ToFloat32(i interface{}) float32 { - v, _ := ToFloat32E(i) - return v -} - -// ToInt64 casts an interface to an int64 type. -func ToInt64(i interface{}) int64 { - v, _ := ToInt64E(i) - return v -} - -// ToInt32 casts an interface to an int32 type. -func ToInt32(i interface{}) int32 { - v, _ := ToInt32E(i) - return v -} - -// ToInt16 casts an interface to an int16 type. -func ToInt16(i interface{}) int16 { - v, _ := ToInt16E(i) - return v -} - -// ToInt8 casts an interface to an int8 type. -func ToInt8(i interface{}) int8 { - v, _ := ToInt8E(i) - return v -} - -// ToInt casts an interface to an int type. -func ToInt(i interface{}) int { - v, _ := ToIntE(i) - return v -} - -// ToUint casts an interface to a uint type. -func ToUint(i interface{}) uint { - v, _ := ToUintE(i) - return v -} - -// ToUint64 casts an interface to a uint64 type. -func ToUint64(i interface{}) uint64 { - v, _ := ToUint64E(i) - return v -} - -// ToUint32 casts an interface to a uint32 type. -func ToUint32(i interface{}) uint32 { - v, _ := ToUint32E(i) - return v -} - -// ToUint16 casts an interface to a uint16 type. -func ToUint16(i interface{}) uint16 { - v, _ := ToUint16E(i) - return v -} - -// ToUint8 casts an interface to a uint8 type. -func ToUint8(i interface{}) uint8 { - v, _ := ToUint8E(i) - return v -} - -// ToString casts an interface to a string type. -func ToString(i interface{}) string { - v, _ := ToStringE(i) - return v -} - -// ToStringMapString casts an interface to a map[string]string type. -func ToStringMapString(i interface{}) map[string]string { - v, _ := ToStringMapStringE(i) - return v -} - -// ToStringMapStringSlice casts an interface to a map[string][]string type. -func ToStringMapStringSlice(i interface{}) map[string][]string { - v, _ := ToStringMapStringSliceE(i) - return v -} - -// ToStringMapBool casts an interface to a map[string]bool type. -func ToStringMapBool(i interface{}) map[string]bool { - v, _ := ToStringMapBoolE(i) - return v -} - -// ToStringMap casts an interface to a map[string]interface{} type. -func ToStringMap(i interface{}) map[string]interface{} { - v, _ := ToStringMapE(i) - return v -} - -// ToSlice casts an interface to a []interface{} type. -func ToSlice(i interface{}) []interface{} { - v, _ := ToSliceE(i) - return v -} - -// ToBoolSlice casts an interface to a []bool type. -func ToBoolSlice(i interface{}) []bool { - v, _ := ToBoolSliceE(i) - return v -} - -// ToStringSlice casts an interface to a []string type. -func ToStringSlice(i interface{}) []string { - v, _ := ToStringSliceE(i) - return v -} - -// ToIntSlice casts an interface to a []int type. -func ToIntSlice(i interface{}) []int { - v, _ := ToIntSliceE(i) - return v -} - -// ToDurationSlice casts an interface to a []time.Duration type. -func ToDurationSlice(i interface{}) []time.Duration { - v, _ := ToDurationSliceE(i) - return v -} diff --git a/vendor/github.com/spf13/cast/caste.go b/vendor/github.com/spf13/cast/caste.go deleted file mode 100644 index 4fe192893..000000000 --- a/vendor/github.com/spf13/cast/caste.go +++ /dev/null @@ -1,1166 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -package cast - -import ( - "encoding/json" - "errors" - "fmt" - "html/template" - "reflect" - "strconv" - "strings" - "time" -) - -var errNegativeNotAllowed = errors.New("unable to cast negative value") - -// ToTimeE casts an interface to a time.Time type. -func ToTimeE(i interface{}) (tim time.Time, err error) { - i = indirect(i) - - switch v := i.(type) { - case time.Time: - return v, nil - case string: - return StringToDate(v) - case int: - return time.Unix(int64(v), 0), nil - case int64: - return time.Unix(v, 0), nil - case int32: - return time.Unix(int64(v), 0), nil - case uint: - return time.Unix(int64(v), 0), nil - case uint64: - return time.Unix(int64(v), 0), nil - case uint32: - return time.Unix(int64(v), 0), nil - default: - return time.Time{}, fmt.Errorf("unable to cast %#v of type %T to Time", i, i) - } -} - -// ToDurationE casts an interface to a time.Duration type. -func ToDurationE(i interface{}) (d time.Duration, err error) { - i = indirect(i) - - switch s := i.(type) { - case time.Duration: - return s, nil - case int, int64, int32, int16, int8, uint, uint64, uint32, uint16, uint8: - d = time.Duration(ToInt64(s)) - return - case float32, float64: - d = time.Duration(ToFloat64(s)) - return - case string: - if strings.ContainsAny(s, "nsuµmh") { - d, err = time.ParseDuration(s) - } else { - d, err = time.ParseDuration(s + "ns") - } - return - default: - err = fmt.Errorf("unable to cast %#v of type %T to Duration", i, i) - return - } -} - -// ToBoolE casts an interface to a bool type. -func ToBoolE(i interface{}) (bool, error) { - i = indirect(i) - - switch b := i.(type) { - case bool: - return b, nil - case nil: - return false, nil - case int: - if i.(int) != 0 { - return true, nil - } - return false, nil - case string: - return strconv.ParseBool(i.(string)) - default: - return false, fmt.Errorf("unable to cast %#v of type %T to bool", i, i) - } -} - -// ToFloat64E casts an interface to a float64 type. -func ToFloat64E(i interface{}) (float64, error) { - i = indirect(i) - - switch s := i.(type) { - case float64: - return s, nil - case float32: - return float64(s), nil - case int: - return float64(s), nil - case int64: - return float64(s), nil - case int32: - return float64(s), nil - case int16: - return float64(s), nil - case int8: - return float64(s), nil - case uint: - return float64(s), nil - case uint64: - return float64(s), nil - case uint32: - return float64(s), nil - case uint16: - return float64(s), nil - case uint8: - return float64(s), nil - case string: - v, err := strconv.ParseFloat(s, 64) - if err == nil { - return v, nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) - case bool: - if s { - return 1, nil - } - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i) - } -} - -// ToFloat32E casts an interface to a float32 type. -func ToFloat32E(i interface{}) (float32, error) { - i = indirect(i) - - switch s := i.(type) { - case float64: - return float32(s), nil - case float32: - return s, nil - case int: - return float32(s), nil - case int64: - return float32(s), nil - case int32: - return float32(s), nil - case int16: - return float32(s), nil - case int8: - return float32(s), nil - case uint: - return float32(s), nil - case uint64: - return float32(s), nil - case uint32: - return float32(s), nil - case uint16: - return float32(s), nil - case uint8: - return float32(s), nil - case string: - v, err := strconv.ParseFloat(s, 32) - if err == nil { - return float32(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) - case bool: - if s { - return 1, nil - } - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i) - } -} - -// ToInt64E casts an interface to an int64 type. -func ToInt64E(i interface{}) (int64, error) { - i = indirect(i) - - switch s := i.(type) { - case int: - return int64(s), nil - case int64: - return s, nil - case int32: - return int64(s), nil - case int16: - return int64(s), nil - case int8: - return int64(s), nil - case uint: - return int64(s), nil - case uint64: - return int64(s), nil - case uint32: - return int64(s), nil - case uint16: - return int64(s), nil - case uint8: - return int64(s), nil - case float64: - return int64(s), nil - case float32: - return int64(s), nil - case string: - v, err := strconv.ParseInt(s, 0, 0) - if err == nil { - return v, nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i) - } -} - -// ToInt32E casts an interface to an int32 type. -func ToInt32E(i interface{}) (int32, error) { - i = indirect(i) - - switch s := i.(type) { - case int: - return int32(s), nil - case int64: - return int32(s), nil - case int32: - return s, nil - case int16: - return int32(s), nil - case int8: - return int32(s), nil - case uint: - return int32(s), nil - case uint64: - return int32(s), nil - case uint32: - return int32(s), nil - case uint16: - return int32(s), nil - case uint8: - return int32(s), nil - case float64: - return int32(s), nil - case float32: - return int32(s), nil - case string: - v, err := strconv.ParseInt(s, 0, 0) - if err == nil { - return int32(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i) - } -} - -// ToInt16E casts an interface to an int16 type. -func ToInt16E(i interface{}) (int16, error) { - i = indirect(i) - - switch s := i.(type) { - case int: - return int16(s), nil - case int64: - return int16(s), nil - case int32: - return int16(s), nil - case int16: - return s, nil - case int8: - return int16(s), nil - case uint: - return int16(s), nil - case uint64: - return int16(s), nil - case uint32: - return int16(s), nil - case uint16: - return int16(s), nil - case uint8: - return int16(s), nil - case float64: - return int16(s), nil - case float32: - return int16(s), nil - case string: - v, err := strconv.ParseInt(s, 0, 0) - if err == nil { - return int16(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i) - } -} - -// ToInt8E casts an interface to an int8 type. -func ToInt8E(i interface{}) (int8, error) { - i = indirect(i) - - switch s := i.(type) { - case int: - return int8(s), nil - case int64: - return int8(s), nil - case int32: - return int8(s), nil - case int16: - return int8(s), nil - case int8: - return s, nil - case uint: - return int8(s), nil - case uint64: - return int8(s), nil - case uint32: - return int8(s), nil - case uint16: - return int8(s), nil - case uint8: - return int8(s), nil - case float64: - return int8(s), nil - case float32: - return int8(s), nil - case string: - v, err := strconv.ParseInt(s, 0, 0) - if err == nil { - return int8(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i) - } -} - -// ToIntE casts an interface to an int type. -func ToIntE(i interface{}) (int, error) { - i = indirect(i) - - switch s := i.(type) { - case int: - return s, nil - case int64: - return int(s), nil - case int32: - return int(s), nil - case int16: - return int(s), nil - case int8: - return int(s), nil - case uint: - return int(s), nil - case uint64: - return int(s), nil - case uint32: - return int(s), nil - case uint16: - return int(s), nil - case uint8: - return int(s), nil - case float64: - return int(s), nil - case float32: - return int(s), nil - case string: - v, err := strconv.ParseInt(s, 0, 0) - if err == nil { - return int(v), nil - } - return 0, fmt.Errorf("unable to cast %#v of type %T to int", i, i) - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to int", i, i) - } -} - -// ToUintE casts an interface to a uint type. -func ToUintE(i interface{}) (uint, error) { - i = indirect(i) - - switch s := i.(type) { - case string: - v, err := strconv.ParseUint(s, 0, 0) - if err == nil { - return uint(v), nil - } - return 0, fmt.Errorf("unable to cast %#v to uint: %s", i, err) - case int: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case uint: - return s, nil - case uint64: - return uint(s), nil - case uint32: - return uint(s), nil - case uint16: - return uint(s), nil - case uint8: - return uint(s), nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint", i, i) - } -} - -// ToUint64E casts an interface to a uint64 type. -func ToUint64E(i interface{}) (uint64, error) { - i = indirect(i) - - switch s := i.(type) { - case string: - v, err := strconv.ParseUint(s, 0, 64) - if err == nil { - return v, nil - } - return 0, fmt.Errorf("unable to cast %#v to uint64: %s", i, err) - case int: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case uint: - return uint64(s), nil - case uint64: - return s, nil - case uint32: - return uint64(s), nil - case uint16: - return uint64(s), nil - case uint8: - return uint64(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint64(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint64", i, i) - } -} - -// ToUint32E casts an interface to a uint32 type. -func ToUint32E(i interface{}) (uint32, error) { - i = indirect(i) - - switch s := i.(type) { - case string: - v, err := strconv.ParseUint(s, 0, 32) - if err == nil { - return uint32(v), nil - } - return 0, fmt.Errorf("unable to cast %#v to uint32: %s", i, err) - case int: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case uint: - return uint32(s), nil - case uint64: - return uint32(s), nil - case uint32: - return s, nil - case uint16: - return uint32(s), nil - case uint8: - return uint32(s), nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint32(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint32", i, i) - } -} - -// ToUint16E casts an interface to a uint16 type. -func ToUint16E(i interface{}) (uint16, error) { - i = indirect(i) - - switch s := i.(type) { - case string: - v, err := strconv.ParseUint(s, 0, 16) - if err == nil { - return uint16(v), nil - } - return 0, fmt.Errorf("unable to cast %#v to uint16: %s", i, err) - case int: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case uint: - return uint16(s), nil - case uint64: - return uint16(s), nil - case uint32: - return uint16(s), nil - case uint16: - return s, nil - case uint8: - return uint16(s), nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint16(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint16", i, i) - } -} - -// ToUint8E casts an interface to a uint type. -func ToUint8E(i interface{}) (uint8, error) { - i = indirect(i) - - switch s := i.(type) { - case string: - v, err := strconv.ParseUint(s, 0, 8) - if err == nil { - return uint8(v), nil - } - return 0, fmt.Errorf("unable to cast %#v to uint8: %s", i, err) - case int: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case int64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case int32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case int16: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case int8: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case uint: - return uint8(s), nil - case uint64: - return uint8(s), nil - case uint32: - return uint8(s), nil - case uint16: - return uint8(s), nil - case uint8: - return s, nil - case float64: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case float32: - if s < 0 { - return 0, errNegativeNotAllowed - } - return uint8(s), nil - case bool: - if s { - return 1, nil - } - return 0, nil - case nil: - return 0, nil - default: - return 0, fmt.Errorf("unable to cast %#v of type %T to uint8", i, i) - } -} - -// From html/template/content.go -// Copyright 2011 The Go Authors. All rights reserved. -// indirect returns the value, after dereferencing as many times -// as necessary to reach the base type (or nil). -func indirect(a interface{}) interface{} { - if a == nil { - return nil - } - if t := reflect.TypeOf(a); t.Kind() != reflect.Ptr { - // Avoid creating a reflect.Value if it's not a pointer. - return a - } - v := reflect.ValueOf(a) - for v.Kind() == reflect.Ptr && !v.IsNil() { - v = v.Elem() - } - return v.Interface() -} - -// From html/template/content.go -// Copyright 2011 The Go Authors. All rights reserved. -// indirectToStringerOrError returns the value, after dereferencing as many times -// as necessary to reach the base type (or nil) or an implementation of fmt.Stringer -// or error, -func indirectToStringerOrError(a interface{}) interface{} { - if a == nil { - return nil - } - - var errorType = reflect.TypeOf((*error)(nil)).Elem() - var fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem() - - v := reflect.ValueOf(a) - for !v.Type().Implements(fmtStringerType) && !v.Type().Implements(errorType) && v.Kind() == reflect.Ptr && !v.IsNil() { - v = v.Elem() - } - return v.Interface() -} - -// ToStringE casts an interface to a string type. -func ToStringE(i interface{}) (string, error) { - i = indirectToStringerOrError(i) - - switch s := i.(type) { - case string: - return s, nil - case bool: - return strconv.FormatBool(s), nil - case float64: - return strconv.FormatFloat(s, 'f', -1, 64), nil - case float32: - return strconv.FormatFloat(float64(s), 'f', -1, 32), nil - case int: - return strconv.Itoa(s), nil - case int64: - return strconv.FormatInt(s, 10), nil - case int32: - return strconv.Itoa(int(s)), nil - case int16: - return strconv.FormatInt(int64(s), 10), nil - case int8: - return strconv.FormatInt(int64(s), 10), nil - case uint: - return strconv.FormatInt(int64(s), 10), nil - case uint64: - return strconv.FormatInt(int64(s), 10), nil - case uint32: - return strconv.FormatInt(int64(s), 10), nil - case uint16: - return strconv.FormatInt(int64(s), 10), nil - case uint8: - return strconv.FormatInt(int64(s), 10), nil - case []byte: - return string(s), nil - case template.HTML: - return string(s), nil - case template.URL: - return string(s), nil - case template.JS: - return string(s), nil - case template.CSS: - return string(s), nil - case template.HTMLAttr: - return string(s), nil - case nil: - return "", nil - case fmt.Stringer: - return s.String(), nil - case error: - return s.Error(), nil - default: - return "", fmt.Errorf("unable to cast %#v of type %T to string", i, i) - } -} - -// ToStringMapStringE casts an interface to a map[string]string type. -func ToStringMapStringE(i interface{}) (map[string]string, error) { - var m = map[string]string{} - - switch v := i.(type) { - case map[string]string: - return v, nil - case map[string]interface{}: - for k, val := range v { - m[ToString(k)] = ToString(val) - } - return m, nil - case map[interface{}]string: - for k, val := range v { - m[ToString(k)] = ToString(val) - } - return m, nil - case map[interface{}]interface{}: - for k, val := range v { - m[ToString(k)] = ToString(val) - } - return m, nil - case string: - err := jsonStringToObject(v, &m) - return m, err - default: - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]string", i, i) - } -} - -// ToStringMapStringSliceE casts an interface to a map[string][]string type. -func ToStringMapStringSliceE(i interface{}) (map[string][]string, error) { - var m = map[string][]string{} - - switch v := i.(type) { - case map[string][]string: - return v, nil - case map[string][]interface{}: - for k, val := range v { - m[ToString(k)] = ToStringSlice(val) - } - return m, nil - case map[string]string: - for k, val := range v { - m[ToString(k)] = []string{val} - } - case map[string]interface{}: - for k, val := range v { - switch vt := val.(type) { - case []interface{}: - m[ToString(k)] = ToStringSlice(vt) - case []string: - m[ToString(k)] = vt - default: - m[ToString(k)] = []string{ToString(val)} - } - } - return m, nil - case map[interface{}][]string: - for k, val := range v { - m[ToString(k)] = ToStringSlice(val) - } - return m, nil - case map[interface{}]string: - for k, val := range v { - m[ToString(k)] = ToStringSlice(val) - } - return m, nil - case map[interface{}][]interface{}: - for k, val := range v { - m[ToString(k)] = ToStringSlice(val) - } - return m, nil - case map[interface{}]interface{}: - for k, val := range v { - key, err := ToStringE(k) - if err != nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i) - } - value, err := ToStringSliceE(val) - if err != nil { - return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i) - } - m[key] = value - } - case string: - err := jsonStringToObject(v, &m) - return m, err - default: - return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i) - } - return m, nil -} - -// ToStringMapBoolE casts an interface to a map[string]bool type. -func ToStringMapBoolE(i interface{}) (map[string]bool, error) { - var m = map[string]bool{} - - switch v := i.(type) { - case map[interface{}]interface{}: - for k, val := range v { - m[ToString(k)] = ToBool(val) - } - return m, nil - case map[string]interface{}: - for k, val := range v { - m[ToString(k)] = ToBool(val) - } - return m, nil - case map[string]bool: - return v, nil - case string: - err := jsonStringToObject(v, &m) - return m, err - default: - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]bool", i, i) - } -} - -// ToStringMapE casts an interface to a map[string]interface{} type. -func ToStringMapE(i interface{}) (map[string]interface{}, error) { - var m = map[string]interface{}{} - - switch v := i.(type) { - case map[interface{}]interface{}: - for k, val := range v { - m[ToString(k)] = val - } - return m, nil - case map[string]interface{}: - return v, nil - case string: - err := jsonStringToObject(v, &m) - return m, err - default: - return m, fmt.Errorf("unable to cast %#v of type %T to map[string]interface{}", i, i) - } -} - -// ToSliceE casts an interface to a []interface{} type. -func ToSliceE(i interface{}) ([]interface{}, error) { - var s []interface{} - - switch v := i.(type) { - case []interface{}: - return append(s, v...), nil - case []map[string]interface{}: - for _, u := range v { - s = append(s, u) - } - return s, nil - default: - return s, fmt.Errorf("unable to cast %#v of type %T to []interface{}", i, i) - } -} - -// ToBoolSliceE casts an interface to a []bool type. -func ToBoolSliceE(i interface{}) ([]bool, error) { - if i == nil { - return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i) - } - - switch v := i.(type) { - case []bool: - return v, nil - } - - kind := reflect.TypeOf(i).Kind() - switch kind { - case reflect.Slice, reflect.Array: - s := reflect.ValueOf(i) - a := make([]bool, s.Len()) - for j := 0; j < s.Len(); j++ { - val, err := ToBoolE(s.Index(j).Interface()) - if err != nil { - return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i) - } - a[j] = val - } - return a, nil - default: - return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i) - } -} - -// ToStringSliceE casts an interface to a []string type. -func ToStringSliceE(i interface{}) ([]string, error) { - var a []string - - switch v := i.(type) { - case []interface{}: - for _, u := range v { - a = append(a, ToString(u)) - } - return a, nil - case []string: - return v, nil - case string: - return strings.Fields(v), nil - case interface{}: - str, err := ToStringE(v) - if err != nil { - return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i) - } - return []string{str}, nil - default: - return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i) - } -} - -// ToIntSliceE casts an interface to a []int type. -func ToIntSliceE(i interface{}) ([]int, error) { - if i == nil { - return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i) - } - - switch v := i.(type) { - case []int: - return v, nil - } - - kind := reflect.TypeOf(i).Kind() - switch kind { - case reflect.Slice, reflect.Array: - s := reflect.ValueOf(i) - a := make([]int, s.Len()) - for j := 0; j < s.Len(); j++ { - val, err := ToIntE(s.Index(j).Interface()) - if err != nil { - return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i) - } - a[j] = val - } - return a, nil - default: - return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i) - } -} - -// ToDurationSliceE casts an interface to a []time.Duration type. -func ToDurationSliceE(i interface{}) ([]time.Duration, error) { - if i == nil { - return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i) - } - - switch v := i.(type) { - case []time.Duration: - return v, nil - } - - kind := reflect.TypeOf(i).Kind() - switch kind { - case reflect.Slice, reflect.Array: - s := reflect.ValueOf(i) - a := make([]time.Duration, s.Len()) - for j := 0; j < s.Len(); j++ { - val, err := ToDurationE(s.Index(j).Interface()) - if err != nil { - return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i) - } - a[j] = val - } - return a, nil - default: - return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i) - } -} - -// StringToDate attempts to parse a string into a time.Time type using a -// predefined list of formats. If no suitable format is found, an error is -// returned. -func StringToDate(s string) (time.Time, error) { - return parseDateWith(s, []string{ - time.RFC3339, - "2006-01-02T15:04:05", // iso8601 without timezone - time.RFC1123Z, - time.RFC1123, - time.RFC822Z, - time.RFC822, - time.RFC850, - time.ANSIC, - time.UnixDate, - time.RubyDate, - "2006-01-02 15:04:05.999999999 -0700 MST", // Time.String() - "2006-01-02", - "02 Jan 2006", - "2006-01-02 15:04:05 -07:00", - "2006-01-02 15:04:05 -0700", - "2006-01-02 15:04:05Z07:00", // RFC3339 without T - "2006-01-02 15:04:05", - time.Kitchen, - time.Stamp, - time.StampMilli, - time.StampMicro, - time.StampNano, - }) -} - -func parseDateWith(s string, dates []string) (d time.Time, e error) { - for _, dateType := range dates { - if d, e = time.Parse(dateType, s); e == nil { - return - } - } - return d, fmt.Errorf("unable to parse date: %s", s) -} - -// jsonStringToObject attempts to unmarshall a string as JSON into -// the object passed as pointer. -func jsonStringToObject(s string, v interface{}) error { - data := []byte(s) - return json.Unmarshal(data, v) -} diff --git a/vendor/github.com/spf13/jwalterweatherman/LICENSE b/vendor/github.com/spf13/jwalterweatherman/LICENSE deleted file mode 100644 index 4527efb9c..000000000 --- a/vendor/github.com/spf13/jwalterweatherman/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Steve Francia - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/spf13/jwalterweatherman/default_notepad.go b/vendor/github.com/spf13/jwalterweatherman/default_notepad.go deleted file mode 100644 index bcb763403..000000000 --- a/vendor/github.com/spf13/jwalterweatherman/default_notepad.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright © 2016 Steve Francia . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -package jwalterweatherman - -import ( - "io" - "io/ioutil" - "log" - "os" -) - -var ( - TRACE *log.Logger - DEBUG *log.Logger - INFO *log.Logger - WARN *log.Logger - ERROR *log.Logger - CRITICAL *log.Logger - FATAL *log.Logger - - LOG *log.Logger - FEEDBACK *Feedback - - defaultNotepad *Notepad -) - -func reloadDefaultNotepad() { - TRACE = defaultNotepad.TRACE - DEBUG = defaultNotepad.DEBUG - INFO = defaultNotepad.INFO - WARN = defaultNotepad.WARN - ERROR = defaultNotepad.ERROR - CRITICAL = defaultNotepad.CRITICAL - FATAL = defaultNotepad.FATAL - - LOG = defaultNotepad.LOG - FEEDBACK = defaultNotepad.FEEDBACK -} - -func init() { - defaultNotepad = NewNotepad(LevelError, LevelWarn, os.Stdout, ioutil.Discard, "", log.Ldate|log.Ltime) - reloadDefaultNotepad() -} - -// SetLogThreshold set the log threshold for the default notepad. Trace by default. -func SetLogThreshold(threshold Threshold) { - defaultNotepad.SetLogThreshold(threshold) - reloadDefaultNotepad() -} - -// SetLogOutput set the log output for the default notepad. Discarded by default. -func SetLogOutput(handle io.Writer) { - defaultNotepad.SetLogOutput(handle) - reloadDefaultNotepad() -} - -// SetStdoutThreshold set the standard output threshold for the default notepad. -// Info by default. -func SetStdoutThreshold(threshold Threshold) { - defaultNotepad.SetStdoutThreshold(threshold) - reloadDefaultNotepad() -} - -// SetPrefix set the prefix for the default logger. Empty by default. -func SetPrefix(prefix string) { - defaultNotepad.SetPrefix(prefix) - reloadDefaultNotepad() -} - -// SetFlags set the flags for the default logger. "log.Ldate | log.Ltime" by default. -func SetFlags(flags int) { - defaultNotepad.SetFlags(flags) - reloadDefaultNotepad() -} - -// Level returns the current global log threshold. -func LogThreshold() Threshold { - return defaultNotepad.logThreshold -} - -// Level returns the current global output threshold. -func StdoutThreshold() Threshold { - return defaultNotepad.stdoutThreshold -} - -// GetStdoutThreshold returns the defined Treshold for the log logger. -func GetLogThreshold() Threshold { - return defaultNotepad.GetLogThreshold() -} - -// GetStdoutThreshold returns the Treshold for the stdout logger. -func GetStdoutThreshold() Threshold { - return defaultNotepad.GetStdoutThreshold() -} - -// LogCountForLevel returns the number of log invocations for a given threshold. -func LogCountForLevel(l Threshold) uint64 { - return defaultNotepad.LogCountForLevel(l) -} - -// LogCountForLevelsGreaterThanorEqualTo returns the number of log invocations -// greater than or equal to a given threshold. -func LogCountForLevelsGreaterThanorEqualTo(threshold Threshold) uint64 { - return defaultNotepad.LogCountForLevelsGreaterThanorEqualTo(threshold) -} - -// ResetLogCounters resets the invocation counters for all levels. -func ResetLogCounters() { - defaultNotepad.ResetLogCounters() -} diff --git a/vendor/github.com/spf13/jwalterweatherman/log_counter.go b/vendor/github.com/spf13/jwalterweatherman/log_counter.go deleted file mode 100644 index 11423ac41..000000000 --- a/vendor/github.com/spf13/jwalterweatherman/log_counter.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright © 2016 Steve Francia . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -package jwalterweatherman - -import ( - "sync/atomic" -) - -type logCounter struct { - counter uint64 -} - -func (c *logCounter) incr() { - atomic.AddUint64(&c.counter, 1) -} - -func (c *logCounter) resetCounter() { - atomic.StoreUint64(&c.counter, 0) -} - -func (c *logCounter) getCount() uint64 { - return atomic.LoadUint64(&c.counter) -} - -func (c *logCounter) Write(p []byte) (n int, err error) { - c.incr() - return len(p), nil -} - -// LogCountForLevel returns the number of log invocations for a given threshold. -func (n *Notepad) LogCountForLevel(l Threshold) uint64 { - return n.logCounters[l].getCount() -} - -// LogCountForLevelsGreaterThanorEqualTo returns the number of log invocations -// greater than or equal to a given threshold. -func (n *Notepad) LogCountForLevelsGreaterThanorEqualTo(threshold Threshold) uint64 { - var cnt uint64 - - for i := int(threshold); i < len(n.logCounters); i++ { - cnt += n.LogCountForLevel(Threshold(i)) - } - - return cnt -} - -// ResetLogCounters resets the invocation counters for all levels. -func (n *Notepad) ResetLogCounters() { - for _, np := range n.logCounters { - np.resetCounter() - } -} diff --git a/vendor/github.com/spf13/jwalterweatherman/notepad.go b/vendor/github.com/spf13/jwalterweatherman/notepad.go deleted file mode 100644 index ae5aaf711..000000000 --- a/vendor/github.com/spf13/jwalterweatherman/notepad.go +++ /dev/null @@ -1,194 +0,0 @@ -// Copyright © 2016 Steve Francia . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -package jwalterweatherman - -import ( - "fmt" - "io" - "log" -) - -type Threshold int - -func (t Threshold) String() string { - return prefixes[t] -} - -const ( - LevelTrace Threshold = iota - LevelDebug - LevelInfo - LevelWarn - LevelError - LevelCritical - LevelFatal -) - -var prefixes map[Threshold]string = map[Threshold]string{ - LevelTrace: "TRACE", - LevelDebug: "DEBUG", - LevelInfo: "INFO", - LevelWarn: "WARN", - LevelError: "ERROR", - LevelCritical: "CRITICAL", - LevelFatal: "FATAL", -} - -// Notepad is where you leave a note! -type Notepad struct { - TRACE *log.Logger - DEBUG *log.Logger - INFO *log.Logger - WARN *log.Logger - ERROR *log.Logger - CRITICAL *log.Logger - FATAL *log.Logger - - LOG *log.Logger - FEEDBACK *Feedback - - loggers [7]**log.Logger - logHandle io.Writer - outHandle io.Writer - logThreshold Threshold - stdoutThreshold Threshold - prefix string - flags int - - // One per Threshold - logCounters [7]*logCounter -} - -// NewNotepad create a new notepad. -func NewNotepad(outThreshold Threshold, logThreshold Threshold, outHandle, logHandle io.Writer, prefix string, flags int) *Notepad { - n := &Notepad{} - - n.loggers = [7]**log.Logger{&n.TRACE, &n.DEBUG, &n.INFO, &n.WARN, &n.ERROR, &n.CRITICAL, &n.FATAL} - n.outHandle = outHandle - n.logHandle = logHandle - n.stdoutThreshold = outThreshold - n.logThreshold = logThreshold - - if len(prefix) != 0 { - n.prefix = "[" + prefix + "] " - } else { - n.prefix = "" - } - - n.flags = flags - - n.LOG = log.New(n.logHandle, - "LOG: ", - n.flags) - n.FEEDBACK = &Feedback{out: log.New(outHandle, "", 0), log: n.LOG} - - n.init() - return n -} - -// init creates the loggers for each level depending on the notepad thresholds. -func (n *Notepad) init() { - logAndOut := io.MultiWriter(n.outHandle, n.logHandle) - - for t, logger := range n.loggers { - threshold := Threshold(t) - counter := &logCounter{} - n.logCounters[t] = counter - prefix := n.prefix + threshold.String() + " " - - switch { - case threshold >= n.logThreshold && threshold >= n.stdoutThreshold: - *logger = log.New(io.MultiWriter(counter, logAndOut), prefix, n.flags) - - case threshold >= n.logThreshold: - *logger = log.New(io.MultiWriter(counter, n.logHandle), prefix, n.flags) - - case threshold >= n.stdoutThreshold: - *logger = log.New(io.MultiWriter(counter, n.outHandle), prefix, n.flags) - - default: - // counter doesn't care about prefix and flags, so don't use them - // for performance. - *logger = log.New(counter, "", 0) - } - } -} - -// SetLogThreshold changes the threshold above which messages are written to the -// log file. -func (n *Notepad) SetLogThreshold(threshold Threshold) { - n.logThreshold = threshold - n.init() -} - -// SetLogOutput changes the file where log messages are written. -func (n *Notepad) SetLogOutput(handle io.Writer) { - n.logHandle = handle - n.init() -} - -// GetStdoutThreshold returns the defined Treshold for the log logger. -func (n *Notepad) GetLogThreshold() Threshold { - return n.logThreshold -} - -// SetStdoutThreshold changes the threshold above which messages are written to the -// standard output. -func (n *Notepad) SetStdoutThreshold(threshold Threshold) { - n.stdoutThreshold = threshold - n.init() -} - -// GetStdoutThreshold returns the Treshold for the stdout logger. -func (n *Notepad) GetStdoutThreshold() Threshold { - return n.stdoutThreshold -} - -// SetPrefix changes the prefix used by the notepad. Prefixes are displayed between -// brackets at the beginning of the line. An empty prefix won't be displayed at all. -func (n *Notepad) SetPrefix(prefix string) { - if len(prefix) != 0 { - n.prefix = "[" + prefix + "] " - } else { - n.prefix = "" - } - n.init() -} - -// SetFlags choose which flags the logger will display (after prefix and message -// level). See the package log for more informations on this. -func (n *Notepad) SetFlags(flags int) { - n.flags = flags - n.init() -} - -// Feedback writes plainly to the outHandle while -// logging with the standard extra information (date, file, etc). -type Feedback struct { - out *log.Logger - log *log.Logger -} - -func (fb *Feedback) Println(v ...interface{}) { - fb.output(fmt.Sprintln(v...)) -} - -func (fb *Feedback) Printf(format string, v ...interface{}) { - fb.output(fmt.Sprintf(format, v...)) -} - -func (fb *Feedback) Print(v ...interface{}) { - fb.output(fmt.Sprint(v...)) -} - -func (fb *Feedback) output(s string) { - if fb.out != nil { - fb.out.Output(2, s) - } - if fb.log != nil { - fb.log.Output(2, s) - } -} diff --git a/vendor/github.com/spf13/viper/LICENSE b/vendor/github.com/spf13/viper/LICENSE deleted file mode 100644 index 4527efb9c..000000000 --- a/vendor/github.com/spf13/viper/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Steve Francia - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/spf13/viper/flags.go b/vendor/github.com/spf13/viper/flags.go deleted file mode 100644 index dd32f4e1c..000000000 --- a/vendor/github.com/spf13/viper/flags.go +++ /dev/null @@ -1,57 +0,0 @@ -package viper - -import "github.com/spf13/pflag" - -// FlagValueSet is an interface that users can implement -// to bind a set of flags to viper. -type FlagValueSet interface { - VisitAll(fn func(FlagValue)) -} - -// FlagValue is an interface that users can implement -// to bind different flags to viper. -type FlagValue interface { - HasChanged() bool - Name() string - ValueString() string - ValueType() string -} - -// pflagValueSet is a wrapper around *pflag.ValueSet -// that implements FlagValueSet. -type pflagValueSet struct { - flags *pflag.FlagSet -} - -// VisitAll iterates over all *pflag.Flag inside the *pflag.FlagSet. -func (p pflagValueSet) VisitAll(fn func(flag FlagValue)) { - p.flags.VisitAll(func(flag *pflag.Flag) { - fn(pflagValue{flag}) - }) -} - -// pflagValue is a wrapper aroung *pflag.flag -// that implements FlagValue -type pflagValue struct { - flag *pflag.Flag -} - -// HasChanges returns whether the flag has changes or not. -func (p pflagValue) HasChanged() bool { - return p.flag.Changed -} - -// Name returns the name of the flag. -func (p pflagValue) Name() string { - return p.flag.Name -} - -// ValueString returns the value of the flag as a string. -func (p pflagValue) ValueString() string { - return p.flag.Value.String() -} - -// ValueType returns the type of the flag as a string. -func (p pflagValue) ValueType() string { - return p.flag.Value.Type() -} diff --git a/vendor/github.com/spf13/viper/util.go b/vendor/github.com/spf13/viper/util.go deleted file mode 100644 index 952cad44c..000000000 --- a/vendor/github.com/spf13/viper/util.go +++ /dev/null @@ -1,221 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -// Viper is a application configuration system. -// It believes that applications can be configured a variety of ways -// via flags, ENVIRONMENT variables, configuration files retrieved -// from the file system, or a remote key/value store. - -package viper - -import ( - "fmt" - "os" - "path/filepath" - "runtime" - "strings" - "unicode" - - "github.com/spf13/afero" - "github.com/spf13/cast" - jww "github.com/spf13/jwalterweatherman" -) - -// ConfigParseError denotes failing to parse configuration file. -type ConfigParseError struct { - err error -} - -// Error returns the formatted configuration error. -func (pe ConfigParseError) Error() string { - return fmt.Sprintf("While parsing config: %s", pe.err.Error()) -} - -// toCaseInsensitiveValue checks if the value is a map; -// if so, create a copy and lower-case the keys recursively. -func toCaseInsensitiveValue(value interface{}) interface{} { - switch v := value.(type) { - case map[interface{}]interface{}: - value = copyAndInsensitiviseMap(cast.ToStringMap(v)) - case map[string]interface{}: - value = copyAndInsensitiviseMap(v) - } - - return value -} - -// copyAndInsensitiviseMap behaves like insensitiviseMap, but creates a copy of -// any map it makes case insensitive. -func copyAndInsensitiviseMap(m map[string]interface{}) map[string]interface{} { - nm := make(map[string]interface{}) - - for key, val := range m { - lkey := strings.ToLower(key) - switch v := val.(type) { - case map[interface{}]interface{}: - nm[lkey] = copyAndInsensitiviseMap(cast.ToStringMap(v)) - case map[string]interface{}: - nm[lkey] = copyAndInsensitiviseMap(v) - default: - nm[lkey] = v - } - } - - return nm -} - -func insensitiviseMap(m map[string]interface{}) { - for key, val := range m { - switch val.(type) { - case map[interface{}]interface{}: - // nested map: cast and recursively insensitivise - val = cast.ToStringMap(val) - insensitiviseMap(val.(map[string]interface{})) - case map[string]interface{}: - // nested map: recursively insensitivise - insensitiviseMap(val.(map[string]interface{})) - } - - lower := strings.ToLower(key) - if key != lower { - // remove old key (not lower-cased) - delete(m, key) - } - // update map - m[lower] = val - } -} - -func absPathify(inPath string) string { - jww.INFO.Println("Trying to resolve absolute path to", inPath) - - if strings.HasPrefix(inPath, "$HOME") { - inPath = userHomeDir() + inPath[5:] - } - - if strings.HasPrefix(inPath, "$") { - end := strings.Index(inPath, string(os.PathSeparator)) - inPath = os.Getenv(inPath[1:end]) + inPath[end:] - } - - if filepath.IsAbs(inPath) { - return filepath.Clean(inPath) - } - - p, err := filepath.Abs(inPath) - if err == nil { - return filepath.Clean(p) - } - - jww.ERROR.Println("Couldn't discover absolute path") - jww.ERROR.Println(err) - return "" -} - -// Check if File / Directory Exists -func exists(fs afero.Fs, path string) (bool, error) { - _, err := fs.Stat(path) - if err == nil { - return true, nil - } - if os.IsNotExist(err) { - return false, nil - } - return false, err -} - -func stringInSlice(a string, list []string) bool { - for _, b := range list { - if b == a { - return true - } - } - return false -} - -func userHomeDir() string { - if runtime.GOOS == "windows" { - home := os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH") - if home == "" { - home = os.Getenv("USERPROFILE") - } - return home - } - return os.Getenv("HOME") -} - -func safeMul(a, b uint) uint { - c := a * b - if a > 1 && b > 1 && c/b != a { - return 0 - } - return c -} - -// parseSizeInBytes converts strings like 1GB or 12 mb into an unsigned integer number of bytes -func parseSizeInBytes(sizeStr string) uint { - sizeStr = strings.TrimSpace(sizeStr) - lastChar := len(sizeStr) - 1 - multiplier := uint(1) - - if lastChar > 0 { - if sizeStr[lastChar] == 'b' || sizeStr[lastChar] == 'B' { - if lastChar > 1 { - switch unicode.ToLower(rune(sizeStr[lastChar-1])) { - case 'k': - multiplier = 1 << 10 - sizeStr = strings.TrimSpace(sizeStr[:lastChar-1]) - case 'm': - multiplier = 1 << 20 - sizeStr = strings.TrimSpace(sizeStr[:lastChar-1]) - case 'g': - multiplier = 1 << 30 - sizeStr = strings.TrimSpace(sizeStr[:lastChar-1]) - default: - multiplier = 1 - sizeStr = strings.TrimSpace(sizeStr[:lastChar]) - } - } - } - } - - size := cast.ToInt(sizeStr) - if size < 0 { - size = 0 - } - - return safeMul(uint(size), multiplier) -} - -// deepSearch scans deep maps, following the key indexes listed in the -// sequence "path". -// The last value is expected to be another map, and is returned. -// -// In case intermediate keys do not exist, or map to a non-map value, -// a new map is created and inserted, and the search continues from there: -// the initial map "m" may be modified! -func deepSearch(m map[string]interface{}, path []string) map[string]interface{} { - for _, k := range path { - m2, ok := m[k] - if !ok { - // intermediate key does not exist - // => create it and continue from there - m3 := make(map[string]interface{}) - m[k] = m3 - m = m3 - continue - } - m3, ok := m2.(map[string]interface{}) - if !ok { - // intermediate key is a value - // => replace with a new map - m3 = make(map[string]interface{}) - m[k] = m3 - } - // continue search from here - m = m3 - } - return m -} diff --git a/vendor/github.com/spf13/viper/viper.go b/vendor/github.com/spf13/viper/viper.go deleted file mode 100644 index e9966ba58..000000000 --- a/vendor/github.com/spf13/viper/viper.go +++ /dev/null @@ -1,1771 +0,0 @@ -// Copyright © 2014 Steve Francia . -// -// Use of this source code is governed by an MIT-style -// license that can be found in the LICENSE file. - -// Viper is a application configuration system. -// It believes that applications can be configured a variety of ways -// via flags, ENVIRONMENT variables, configuration files retrieved -// from the file system, or a remote key/value store. - -// Each item takes precedence over the item below it: - -// overrides -// flag -// env -// config -// key/value store -// default - -package viper - -import ( - "bytes" - "encoding/csv" - "encoding/json" - "fmt" - "io" - "log" - "os" - "path/filepath" - "reflect" - "strings" - "time" - - yaml "gopkg.in/yaml.v2" - - "github.com/fsnotify/fsnotify" - "github.com/hashicorp/hcl" - "github.com/hashicorp/hcl/hcl/printer" - "github.com/magiconair/properties" - "github.com/mitchellh/mapstructure" - toml "github.com/pelletier/go-toml" - "github.com/spf13/afero" - "github.com/spf13/cast" - jww "github.com/spf13/jwalterweatherman" - "github.com/spf13/pflag" -) - -// ConfigMarshalError happens when failing to marshal the configuration. -type ConfigMarshalError struct { - err error -} - -// Error returns the formatted configuration error. -func (e ConfigMarshalError) Error() string { - return fmt.Sprintf("While marshaling config: %s", e.err.Error()) -} - -var v *Viper - -type RemoteResponse struct { - Value []byte - Error error -} - -func init() { - v = New() -} - -type remoteConfigFactory interface { - Get(rp RemoteProvider) (io.Reader, error) - Watch(rp RemoteProvider) (io.Reader, error) - WatchChannel(rp RemoteProvider) (<-chan *RemoteResponse, chan bool) -} - -// RemoteConfig is optional, see the remote package -var RemoteConfig remoteConfigFactory - -// UnsupportedConfigError denotes encountering an unsupported -// configuration filetype. -type UnsupportedConfigError string - -// Error returns the formatted configuration error. -func (str UnsupportedConfigError) Error() string { - return fmt.Sprintf("Unsupported Config Type %q", string(str)) -} - -// UnsupportedRemoteProviderError denotes encountering an unsupported remote -// provider. Currently only etcd and Consul are supported. -type UnsupportedRemoteProviderError string - -// Error returns the formatted remote provider error. -func (str UnsupportedRemoteProviderError) Error() string { - return fmt.Sprintf("Unsupported Remote Provider Type %q", string(str)) -} - -// RemoteConfigError denotes encountering an error while trying to -// pull the configuration from the remote provider. -type RemoteConfigError string - -// Error returns the formatted remote provider error -func (rce RemoteConfigError) Error() string { - return fmt.Sprintf("Remote Configurations Error: %s", string(rce)) -} - -// ConfigFileNotFoundError denotes failing to find configuration file. -type ConfigFileNotFoundError struct { - name, locations string -} - -// Error returns the formatted configuration error. -func (fnfe ConfigFileNotFoundError) Error() string { - return fmt.Sprintf("Config File %q Not Found in %q", fnfe.name, fnfe.locations) -} - -// Viper is a prioritized configuration registry. It -// maintains a set of configuration sources, fetches -// values to populate those, and provides them according -// to the source's priority. -// The priority of the sources is the following: -// 1. overrides -// 2. flags -// 3. env. variables -// 4. config file -// 5. key/value store -// 6. defaults -// -// For example, if values from the following sources were loaded: -// -// Defaults : { -// "secret": "", -// "user": "default", -// "endpoint": "https://localhost" -// } -// Config : { -// "user": "root" -// "secret": "defaultsecret" -// } -// Env : { -// "secret": "somesecretkey" -// } -// -// The resulting config will have the following values: -// -// { -// "secret": "somesecretkey", -// "user": "root", -// "endpoint": "https://localhost" -// } -type Viper struct { - // Delimiter that separates a list of keys - // used to access a nested value in one go - keyDelim string - - // A set of paths to look for the config file in - configPaths []string - - // The filesystem to read config from. - fs afero.Fs - - // A set of remote providers to search for the configuration - remoteProviders []*defaultRemoteProvider - - // Name of file to look for inside the path - configName string - configFile string - configType string - envPrefix string - - automaticEnvApplied bool - envKeyReplacer *strings.Replacer - - config map[string]interface{} - override map[string]interface{} - defaults map[string]interface{} - kvstore map[string]interface{} - pflags map[string]FlagValue - env map[string]string - aliases map[string]string - typeByDefValue bool - - // Store read properties on the object so that we can write back in order with comments. - // This will only be used if the configuration read is a properties file. - properties *properties.Properties - - onConfigChange func(fsnotify.Event) -} - -// New returns an initialized Viper instance. -func New() *Viper { - v := new(Viper) - v.keyDelim = "." - v.configName = "config" - v.fs = afero.NewOsFs() - v.config = make(map[string]interface{}) - v.override = make(map[string]interface{}) - v.defaults = make(map[string]interface{}) - v.kvstore = make(map[string]interface{}) - v.pflags = make(map[string]FlagValue) - v.env = make(map[string]string) - v.aliases = make(map[string]string) - v.typeByDefValue = false - - return v -} - -// Intended for testing, will reset all to default settings. -// In the public interface for the viper package so applications -// can use it in their testing as well. -func Reset() { - v = New() - SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl"} - SupportedRemoteProviders = []string{"etcd", "consul"} -} - -type defaultRemoteProvider struct { - provider string - endpoint string - path string - secretKeyring string -} - -func (rp defaultRemoteProvider) Provider() string { - return rp.provider -} - -func (rp defaultRemoteProvider) Endpoint() string { - return rp.endpoint -} - -func (rp defaultRemoteProvider) Path() string { - return rp.path -} - -func (rp defaultRemoteProvider) SecretKeyring() string { - return rp.secretKeyring -} - -// RemoteProvider stores the configuration necessary -// to connect to a remote key/value store. -// Optional secretKeyring to unencrypt encrypted values -// can be provided. -type RemoteProvider interface { - Provider() string - Endpoint() string - Path() string - SecretKeyring() string -} - -// SupportedExts are universally supported extensions. -var SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl"} - -// SupportedRemoteProviders are universally supported remote providers. -var SupportedRemoteProviders = []string{"etcd", "consul"} - -func OnConfigChange(run func(in fsnotify.Event)) { v.OnConfigChange(run) } -func (v *Viper) OnConfigChange(run func(in fsnotify.Event)) { - v.onConfigChange = run -} - -func WatchConfig() { v.WatchConfig() } -func (v *Viper) WatchConfig() { - go func() { - watcher, err := fsnotify.NewWatcher() - if err != nil { - log.Fatal(err) - } - defer watcher.Close() - - // we have to watch the entire directory to pick up renames/atomic saves in a cross-platform way - filename, err := v.getConfigFile() - if err != nil { - log.Println("error:", err) - return - } - - configFile := filepath.Clean(filename) - configDir, _ := filepath.Split(configFile) - - done := make(chan bool) - go func() { - for { - select { - case event := <-watcher.Events: - // we only care about the config file - if filepath.Clean(event.Name) == configFile { - if event.Op&fsnotify.Write == fsnotify.Write || event.Op&fsnotify.Create == fsnotify.Create { - err := v.ReadInConfig() - if err != nil { - log.Println("error:", err) - } - v.onConfigChange(event) - } - } - case err := <-watcher.Errors: - log.Println("error:", err) - } - } - }() - - watcher.Add(configDir) - <-done - }() -} - -// SetConfigFile explicitly defines the path, name and extension of the config file. -// Viper will use this and not check any of the config paths. -func SetConfigFile(in string) { v.SetConfigFile(in) } -func (v *Viper) SetConfigFile(in string) { - if in != "" { - v.configFile = in - } -} - -// SetEnvPrefix defines a prefix that ENVIRONMENT variables will use. -// E.g. if your prefix is "spf", the env registry will look for env -// variables that start with "SPF_". -func SetEnvPrefix(in string) { v.SetEnvPrefix(in) } -func (v *Viper) SetEnvPrefix(in string) { - if in != "" { - v.envPrefix = in - } -} - -func (v *Viper) mergeWithEnvPrefix(in string) string { - if v.envPrefix != "" { - return strings.ToUpper(v.envPrefix + "_" + in) - } - - return strings.ToUpper(in) -} - -// TODO: should getEnv logic be moved into find(). Can generalize the use of -// rewriting keys many things, Ex: Get('someKey') -> some_key -// (camel case to snake case for JSON keys perhaps) - -// getEnv is a wrapper around os.Getenv which replaces characters in the original -// key. This allows env vars which have different keys than the config object -// keys. -func (v *Viper) getEnv(key string) string { - if v.envKeyReplacer != nil { - key = v.envKeyReplacer.Replace(key) - } - return os.Getenv(key) -} - -// ConfigFileUsed returns the file used to populate the config registry. -func ConfigFileUsed() string { return v.ConfigFileUsed() } -func (v *Viper) ConfigFileUsed() string { return v.configFile } - -// AddConfigPath adds a path for Viper to search for the config file in. -// Can be called multiple times to define multiple search paths. -func AddConfigPath(in string) { v.AddConfigPath(in) } -func (v *Viper) AddConfigPath(in string) { - if in != "" { - absin := absPathify(in) - jww.INFO.Println("adding", absin, "to paths to search") - if !stringInSlice(absin, v.configPaths) { - v.configPaths = append(v.configPaths, absin) - } - } -} - -// AddRemoteProvider adds a remote configuration source. -// Remote Providers are searched in the order they are added. -// provider is a string value, "etcd" or "consul" are currently supported. -// endpoint is the url. etcd requires http://ip:port consul requires ip:port -// path is the path in the k/v store to retrieve configuration -// To retrieve a config file called myapp.json from /configs/myapp.json -// you should set path to /configs and set config name (SetConfigName()) to -// "myapp" -func AddRemoteProvider(provider, endpoint, path string) error { - return v.AddRemoteProvider(provider, endpoint, path) -} -func (v *Viper) AddRemoteProvider(provider, endpoint, path string) error { - if !stringInSlice(provider, SupportedRemoteProviders) { - return UnsupportedRemoteProviderError(provider) - } - if provider != "" && endpoint != "" { - jww.INFO.Printf("adding %s:%s to remote provider list", provider, endpoint) - rp := &defaultRemoteProvider{ - endpoint: endpoint, - provider: provider, - path: path, - } - if !v.providerPathExists(rp) { - v.remoteProviders = append(v.remoteProviders, rp) - } - } - return nil -} - -// AddSecureRemoteProvider adds a remote configuration source. -// Secure Remote Providers are searched in the order they are added. -// provider is a string value, "etcd" or "consul" are currently supported. -// endpoint is the url. etcd requires http://ip:port consul requires ip:port -// secretkeyring is the filepath to your openpgp secret keyring. e.g. /etc/secrets/myring.gpg -// path is the path in the k/v store to retrieve configuration -// To retrieve a config file called myapp.json from /configs/myapp.json -// you should set path to /configs and set config name (SetConfigName()) to -// "myapp" -// Secure Remote Providers are implemented with github.com/xordataexchange/crypt -func AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error { - return v.AddSecureRemoteProvider(provider, endpoint, path, secretkeyring) -} - -func (v *Viper) AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error { - if !stringInSlice(provider, SupportedRemoteProviders) { - return UnsupportedRemoteProviderError(provider) - } - if provider != "" && endpoint != "" { - jww.INFO.Printf("adding %s:%s to remote provider list", provider, endpoint) - rp := &defaultRemoteProvider{ - endpoint: endpoint, - provider: provider, - path: path, - secretKeyring: secretkeyring, - } - if !v.providerPathExists(rp) { - v.remoteProviders = append(v.remoteProviders, rp) - } - } - return nil -} - -func (v *Viper) providerPathExists(p *defaultRemoteProvider) bool { - for _, y := range v.remoteProviders { - if reflect.DeepEqual(y, p) { - return true - } - } - return false -} - -// searchMap recursively searches for a value for path in source map. -// Returns nil if not found. -// Note: This assumes that the path entries and map keys are lower cased. -func (v *Viper) searchMap(source map[string]interface{}, path []string) interface{} { - if len(path) == 0 { - return source - } - - next, ok := source[path[0]] - if ok { - // Fast path - if len(path) == 1 { - return next - } - - // Nested case - switch next.(type) { - case map[interface{}]interface{}: - return v.searchMap(cast.ToStringMap(next), path[1:]) - case map[string]interface{}: - // Type assertion is safe here since it is only reached - // if the type of `next` is the same as the type being asserted - return v.searchMap(next.(map[string]interface{}), path[1:]) - default: - // got a value but nested key expected, return "nil" for not found - return nil - } - } - return nil -} - -// searchMapWithPathPrefixes recursively searches for a value for path in source map. -// -// While searchMap() considers each path element as a single map key, this -// function searches for, and prioritizes, merged path elements. -// e.g., if in the source, "foo" is defined with a sub-key "bar", and "foo.bar" -// is also defined, this latter value is returned for path ["foo", "bar"]. -// -// This should be useful only at config level (other maps may not contain dots -// in their keys). -// -// Note: This assumes that the path entries and map keys are lower cased. -func (v *Viper) searchMapWithPathPrefixes(source map[string]interface{}, path []string) interface{} { - if len(path) == 0 { - return source - } - - // search for path prefixes, starting from the longest one - for i := len(path); i > 0; i-- { - prefixKey := strings.ToLower(strings.Join(path[0:i], v.keyDelim)) - - next, ok := source[prefixKey] - if ok { - // Fast path - if i == len(path) { - return next - } - - // Nested case - var val interface{} - switch next.(type) { - case map[interface{}]interface{}: - val = v.searchMapWithPathPrefixes(cast.ToStringMap(next), path[i:]) - case map[string]interface{}: - // Type assertion is safe here since it is only reached - // if the type of `next` is the same as the type being asserted - val = v.searchMapWithPathPrefixes(next.(map[string]interface{}), path[i:]) - default: - // got a value but nested key expected, do nothing and look for next prefix - } - if val != nil { - return val - } - } - } - - // not found - return nil -} - -// isPathShadowedInDeepMap makes sure the given path is not shadowed somewhere -// on its path in the map. -// e.g., if "foo.bar" has a value in the given map, it “shadows” -// "foo.bar.baz" in a lower-priority map -func (v *Viper) isPathShadowedInDeepMap(path []string, m map[string]interface{}) string { - var parentVal interface{} - for i := 1; i < len(path); i++ { - parentVal = v.searchMap(m, path[0:i]) - if parentVal == nil { - // not found, no need to add more path elements - return "" - } - switch parentVal.(type) { - case map[interface{}]interface{}: - continue - case map[string]interface{}: - continue - default: - // parentVal is a regular value which shadows "path" - return strings.Join(path[0:i], v.keyDelim) - } - } - return "" -} - -// isPathShadowedInFlatMap makes sure the given path is not shadowed somewhere -// in a sub-path of the map. -// e.g., if "foo.bar" has a value in the given map, it “shadows” -// "foo.bar.baz" in a lower-priority map -func (v *Viper) isPathShadowedInFlatMap(path []string, mi interface{}) string { - // unify input map - var m map[string]interface{} - switch mi.(type) { - case map[string]string, map[string]FlagValue: - m = cast.ToStringMap(mi) - default: - return "" - } - - // scan paths - var parentKey string - for i := 1; i < len(path); i++ { - parentKey = strings.Join(path[0:i], v.keyDelim) - if _, ok := m[parentKey]; ok { - return parentKey - } - } - return "" -} - -// isPathShadowedInAutoEnv makes sure the given path is not shadowed somewhere -// in the environment, when automatic env is on. -// e.g., if "foo.bar" has a value in the environment, it “shadows” -// "foo.bar.baz" in a lower-priority map -func (v *Viper) isPathShadowedInAutoEnv(path []string) string { - var parentKey string - var val string - for i := 1; i < len(path); i++ { - parentKey = strings.Join(path[0:i], v.keyDelim) - if val = v.getEnv(v.mergeWithEnvPrefix(parentKey)); val != "" { - return parentKey - } - } - return "" -} - -// SetTypeByDefaultValue enables or disables the inference of a key value's -// type when the Get function is used based upon a key's default value as -// opposed to the value returned based on the normal fetch logic. -// -// For example, if a key has a default value of []string{} and the same key -// is set via an environment variable to "a b c", a call to the Get function -// would return a string slice for the key if the key's type is inferred by -// the default value and the Get function would return: -// -// []string {"a", "b", "c"} -// -// Otherwise the Get function would return: -// -// "a b c" -func SetTypeByDefaultValue(enable bool) { v.SetTypeByDefaultValue(enable) } -func (v *Viper) SetTypeByDefaultValue(enable bool) { - v.typeByDefValue = enable -} - -// GetViper gets the global Viper instance. -func GetViper() *Viper { - return v -} - -// Get can retrieve any value given the key to use. -// Get is case-insensitive for a key. -// Get has the behavior of returning the value associated with the first -// place from where it is set. Viper will check in the following order: -// override, flag, env, config file, key/value store, default -// -// Get returns an interface. For a specific value use one of the Get____ methods. -func Get(key string) interface{} { return v.Get(key) } -func (v *Viper) Get(key string) interface{} { - lcaseKey := strings.ToLower(key) - val := v.find(lcaseKey) - if val == nil { - return nil - } - - if v.typeByDefValue { - // TODO(bep) this branch isn't covered by a single test. - valType := val - path := strings.Split(lcaseKey, v.keyDelim) - defVal := v.searchMap(v.defaults, path) - if defVal != nil { - valType = defVal - } - - switch valType.(type) { - case bool: - return cast.ToBool(val) - case string: - return cast.ToString(val) - case int64, int32, int16, int8, int: - return cast.ToInt(val) - case float64, float32: - return cast.ToFloat64(val) - case time.Time: - return cast.ToTime(val) - case time.Duration: - return cast.ToDuration(val) - case []string: - return cast.ToStringSlice(val) - } - } - - return val -} - -// Sub returns new Viper instance representing a sub tree of this instance. -// Sub is case-insensitive for a key. -func Sub(key string) *Viper { return v.Sub(key) } -func (v *Viper) Sub(key string) *Viper { - subv := New() - data := v.Get(key) - if data == nil { - return nil - } - - if reflect.TypeOf(data).Kind() == reflect.Map { - subv.config = cast.ToStringMap(data) - return subv - } - return nil -} - -// GetString returns the value associated with the key as a string. -func GetString(key string) string { return v.GetString(key) } -func (v *Viper) GetString(key string) string { - return cast.ToString(v.Get(key)) -} - -// GetBool returns the value associated with the key as a boolean. -func GetBool(key string) bool { return v.GetBool(key) } -func (v *Viper) GetBool(key string) bool { - return cast.ToBool(v.Get(key)) -} - -// GetInt returns the value associated with the key as an integer. -func GetInt(key string) int { return v.GetInt(key) } -func (v *Viper) GetInt(key string) int { - return cast.ToInt(v.Get(key)) -} - -// GetInt64 returns the value associated with the key as an integer. -func GetInt64(key string) int64 { return v.GetInt64(key) } -func (v *Viper) GetInt64(key string) int64 { - return cast.ToInt64(v.Get(key)) -} - -// GetFloat64 returns the value associated with the key as a float64. -func GetFloat64(key string) float64 { return v.GetFloat64(key) } -func (v *Viper) GetFloat64(key string) float64 { - return cast.ToFloat64(v.Get(key)) -} - -// GetTime returns the value associated with the key as time. -func GetTime(key string) time.Time { return v.GetTime(key) } -func (v *Viper) GetTime(key string) time.Time { - return cast.ToTime(v.Get(key)) -} - -// GetDuration returns the value associated with the key as a duration. -func GetDuration(key string) time.Duration { return v.GetDuration(key) } -func (v *Viper) GetDuration(key string) time.Duration { - return cast.ToDuration(v.Get(key)) -} - -// GetStringSlice returns the value associated with the key as a slice of strings. -func GetStringSlice(key string) []string { return v.GetStringSlice(key) } -func (v *Viper) GetStringSlice(key string) []string { - return cast.ToStringSlice(v.Get(key)) -} - -// GetStringMap returns the value associated with the key as a map of interfaces. -func GetStringMap(key string) map[string]interface{} { return v.GetStringMap(key) } -func (v *Viper) GetStringMap(key string) map[string]interface{} { - return cast.ToStringMap(v.Get(key)) -} - -// GetStringMapString returns the value associated with the key as a map of strings. -func GetStringMapString(key string) map[string]string { return v.GetStringMapString(key) } -func (v *Viper) GetStringMapString(key string) map[string]string { - return cast.ToStringMapString(v.Get(key)) -} - -// GetStringMapStringSlice returns the value associated with the key as a map to a slice of strings. -func GetStringMapStringSlice(key string) map[string][]string { return v.GetStringMapStringSlice(key) } -func (v *Viper) GetStringMapStringSlice(key string) map[string][]string { - return cast.ToStringMapStringSlice(v.Get(key)) -} - -// GetSizeInBytes returns the size of the value associated with the given key -// in bytes. -func GetSizeInBytes(key string) uint { return v.GetSizeInBytes(key) } -func (v *Viper) GetSizeInBytes(key string) uint { - sizeStr := cast.ToString(v.Get(key)) - return parseSizeInBytes(sizeStr) -} - -// UnmarshalKey takes a single key and unmarshals it into a Struct. -func UnmarshalKey(key string, rawVal interface{}) error { return v.UnmarshalKey(key, rawVal) } -func (v *Viper) UnmarshalKey(key string, rawVal interface{}) error { - err := decode(v.Get(key), defaultDecoderConfig(rawVal)) - - if err != nil { - return err - } - - v.insensitiviseMaps() - - return nil -} - -// Unmarshal unmarshals the config into a Struct. Make sure that the tags -// on the fields of the structure are properly set. -func Unmarshal(rawVal interface{}) error { return v.Unmarshal(rawVal) } -func (v *Viper) Unmarshal(rawVal interface{}) error { - err := decode(v.AllSettings(), defaultDecoderConfig(rawVal)) - - if err != nil { - return err - } - - v.insensitiviseMaps() - - return nil -} - -// defaultDecoderConfig returns default mapsstructure.DecoderConfig with suppot -// of time.Duration values & string slices -func defaultDecoderConfig(output interface{}) *mapstructure.DecoderConfig { - return &mapstructure.DecoderConfig{ - Metadata: nil, - Result: output, - WeaklyTypedInput: true, - DecodeHook: mapstructure.ComposeDecodeHookFunc( - mapstructure.StringToTimeDurationHookFunc(), - mapstructure.StringToSliceHookFunc(","), - ), - } -} - -// A wrapper around mapstructure.Decode that mimics the WeakDecode functionality -func decode(input interface{}, config *mapstructure.DecoderConfig) error { - decoder, err := mapstructure.NewDecoder(config) - if err != nil { - return err - } - return decoder.Decode(input) -} - -// UnmarshalExact unmarshals the config into a Struct, erroring if a field is nonexistent -// in the destination struct. -func (v *Viper) UnmarshalExact(rawVal interface{}) error { - config := defaultDecoderConfig(rawVal) - config.ErrorUnused = true - - err := decode(v.AllSettings(), config) - - if err != nil { - return err - } - - v.insensitiviseMaps() - - return nil -} - -// BindPFlags binds a full flag set to the configuration, using each flag's long -// name as the config key. -func BindPFlags(flags *pflag.FlagSet) error { return v.BindPFlags(flags) } -func (v *Viper) BindPFlags(flags *pflag.FlagSet) error { - return v.BindFlagValues(pflagValueSet{flags}) -} - -// BindPFlag binds a specific key to a pflag (as used by cobra). -// Example (where serverCmd is a Cobra instance): -// -// serverCmd.Flags().Int("port", 1138, "Port to run Application server on") -// Viper.BindPFlag("port", serverCmd.Flags().Lookup("port")) -// -func BindPFlag(key string, flag *pflag.Flag) error { return v.BindPFlag(key, flag) } -func (v *Viper) BindPFlag(key string, flag *pflag.Flag) error { - return v.BindFlagValue(key, pflagValue{flag}) -} - -// BindFlagValues binds a full FlagValue set to the configuration, using each flag's long -// name as the config key. -func BindFlagValues(flags FlagValueSet) error { return v.BindFlagValues(flags) } -func (v *Viper) BindFlagValues(flags FlagValueSet) (err error) { - flags.VisitAll(func(flag FlagValue) { - if err = v.BindFlagValue(flag.Name(), flag); err != nil { - return - } - }) - return nil -} - -// BindFlagValue binds a specific key to a FlagValue. -// Example (where serverCmd is a Cobra instance): -// -// serverCmd.Flags().Int("port", 1138, "Port to run Application server on") -// Viper.BindFlagValue("port", serverCmd.Flags().Lookup("port")) -// -func BindFlagValue(key string, flag FlagValue) error { return v.BindFlagValue(key, flag) } -func (v *Viper) BindFlagValue(key string, flag FlagValue) error { - if flag == nil { - return fmt.Errorf("flag for %q is nil", key) - } - v.pflags[strings.ToLower(key)] = flag - return nil -} - -// BindEnv binds a Viper key to a ENV variable. -// ENV variables are case sensitive. -// If only a key is provided, it will use the env key matching the key, uppercased. -// EnvPrefix will be used when set when env name is not provided. -func BindEnv(input ...string) error { return v.BindEnv(input...) } -func (v *Viper) BindEnv(input ...string) error { - var key, envkey string - if len(input) == 0 { - return fmt.Errorf("BindEnv missing key to bind to") - } - - key = strings.ToLower(input[0]) - - if len(input) == 1 { - envkey = v.mergeWithEnvPrefix(key) - } else { - envkey = input[1] - } - - v.env[key] = envkey - - return nil -} - -// Given a key, find the value. -// Viper will check in the following order: -// flag, env, config file, key/value store, default. -// Viper will check to see if an alias exists first. -// Note: this assumes a lower-cased key given. -func (v *Viper) find(lcaseKey string) interface{} { - - var ( - val interface{} - exists bool - path = strings.Split(lcaseKey, v.keyDelim) - nested = len(path) > 1 - ) - - // compute the path through the nested maps to the nested value - if nested && v.isPathShadowedInDeepMap(path, castMapStringToMapInterface(v.aliases)) != "" { - return nil - } - - // if the requested key is an alias, then return the proper key - lcaseKey = v.realKey(lcaseKey) - path = strings.Split(lcaseKey, v.keyDelim) - nested = len(path) > 1 - - // Set() override first - val = v.searchMap(v.override, path) - if val != nil { - return val - } - if nested && v.isPathShadowedInDeepMap(path, v.override) != "" { - return nil - } - - // PFlag override next - flag, exists := v.pflags[lcaseKey] - if exists && flag.HasChanged() { - switch flag.ValueType() { - case "int", "int8", "int16", "int32", "int64": - return cast.ToInt(flag.ValueString()) - case "bool": - return cast.ToBool(flag.ValueString()) - case "stringSlice": - s := strings.TrimPrefix(flag.ValueString(), "[") - s = strings.TrimSuffix(s, "]") - res, _ := readAsCSV(s) - return res - default: - return flag.ValueString() - } - } - if nested && v.isPathShadowedInFlatMap(path, v.pflags) != "" { - return nil - } - - // Env override next - if v.automaticEnvApplied { - // even if it hasn't been registered, if automaticEnv is used, - // check any Get request - if val = v.getEnv(v.mergeWithEnvPrefix(lcaseKey)); val != "" { - return val - } - if nested && v.isPathShadowedInAutoEnv(path) != "" { - return nil - } - } - envkey, exists := v.env[lcaseKey] - if exists { - if val = v.getEnv(envkey); val != "" { - return val - } - } - if nested && v.isPathShadowedInFlatMap(path, v.env) != "" { - return nil - } - - // Config file next - val = v.searchMapWithPathPrefixes(v.config, path) - if val != nil { - return val - } - if nested && v.isPathShadowedInDeepMap(path, v.config) != "" { - return nil - } - - // K/V store next - val = v.searchMap(v.kvstore, path) - if val != nil { - return val - } - if nested && v.isPathShadowedInDeepMap(path, v.kvstore) != "" { - return nil - } - - // Default next - val = v.searchMap(v.defaults, path) - if val != nil { - return val - } - if nested && v.isPathShadowedInDeepMap(path, v.defaults) != "" { - return nil - } - - // last chance: if no other value is returned and a flag does exist for the value, - // get the flag's value even if the flag's value has not changed - if flag, exists := v.pflags[lcaseKey]; exists { - switch flag.ValueType() { - case "int", "int8", "int16", "int32", "int64": - return cast.ToInt(flag.ValueString()) - case "bool": - return cast.ToBool(flag.ValueString()) - case "stringSlice": - s := strings.TrimPrefix(flag.ValueString(), "[") - s = strings.TrimSuffix(s, "]") - res, _ := readAsCSV(s) - return res - default: - return flag.ValueString() - } - } - // last item, no need to check shadowing - - return nil -} - -func readAsCSV(val string) ([]string, error) { - if val == "" { - return []string{}, nil - } - stringReader := strings.NewReader(val) - csvReader := csv.NewReader(stringReader) - return csvReader.Read() -} - -// IsSet checks to see if the key has been set in any of the data locations. -// IsSet is case-insensitive for a key. -func IsSet(key string) bool { return v.IsSet(key) } -func (v *Viper) IsSet(key string) bool { - lcaseKey := strings.ToLower(key) - val := v.find(lcaseKey) - return val != nil -} - -// AutomaticEnv has Viper check ENV variables for all. -// keys set in config, default & flags -func AutomaticEnv() { v.AutomaticEnv() } -func (v *Viper) AutomaticEnv() { - v.automaticEnvApplied = true -} - -// SetEnvKeyReplacer sets the strings.Replacer on the viper object -// Useful for mapping an environmental variable to a key that does -// not match it. -func SetEnvKeyReplacer(r *strings.Replacer) { v.SetEnvKeyReplacer(r) } -func (v *Viper) SetEnvKeyReplacer(r *strings.Replacer) { - v.envKeyReplacer = r -} - -// Aliases provide another accessor for the same key. -// This enables one to change a name without breaking the application -func RegisterAlias(alias string, key string) { v.RegisterAlias(alias, key) } -func (v *Viper) RegisterAlias(alias string, key string) { - v.registerAlias(alias, strings.ToLower(key)) -} - -func (v *Viper) registerAlias(alias string, key string) { - alias = strings.ToLower(alias) - if alias != key && alias != v.realKey(key) { - _, exists := v.aliases[alias] - - if !exists { - // if we alias something that exists in one of the maps to another - // name, we'll never be able to get that value using the original - // name, so move the config value to the new realkey. - if val, ok := v.config[alias]; ok { - delete(v.config, alias) - v.config[key] = val - } - if val, ok := v.kvstore[alias]; ok { - delete(v.kvstore, alias) - v.kvstore[key] = val - } - if val, ok := v.defaults[alias]; ok { - delete(v.defaults, alias) - v.defaults[key] = val - } - if val, ok := v.override[alias]; ok { - delete(v.override, alias) - v.override[key] = val - } - v.aliases[alias] = key - } - } else { - jww.WARN.Println("Creating circular reference alias", alias, key, v.realKey(key)) - } -} - -func (v *Viper) realKey(key string) string { - newkey, exists := v.aliases[key] - if exists { - jww.DEBUG.Println("Alias", key, "to", newkey) - return v.realKey(newkey) - } - return key -} - -// InConfig checks to see if the given key (or an alias) is in the config file. -func InConfig(key string) bool { return v.InConfig(key) } -func (v *Viper) InConfig(key string) bool { - // if the requested key is an alias, then return the proper key - key = v.realKey(key) - - _, exists := v.config[key] - return exists -} - -// SetDefault sets the default value for this key. -// SetDefault is case-insensitive for a key. -// Default only used when no value is provided by the user via flag, config or ENV. -func SetDefault(key string, value interface{}) { v.SetDefault(key, value) } -func (v *Viper) SetDefault(key string, value interface{}) { - // If alias passed in, then set the proper default - key = v.realKey(strings.ToLower(key)) - value = toCaseInsensitiveValue(value) - - path := strings.Split(key, v.keyDelim) - lastKey := strings.ToLower(path[len(path)-1]) - deepestMap := deepSearch(v.defaults, path[0:len(path)-1]) - - // set innermost value - deepestMap[lastKey] = value -} - -// Set sets the value for the key in the override regiser. -// Set is case-insensitive for a key. -// Will be used instead of values obtained via -// flags, config file, ENV, default, or key/value store. -func Set(key string, value interface{}) { v.Set(key, value) } -func (v *Viper) Set(key string, value interface{}) { - // If alias passed in, then set the proper override - key = v.realKey(strings.ToLower(key)) - value = toCaseInsensitiveValue(value) - - path := strings.Split(key, v.keyDelim) - lastKey := strings.ToLower(path[len(path)-1]) - deepestMap := deepSearch(v.override, path[0:len(path)-1]) - - // set innermost value - deepestMap[lastKey] = value -} - -// ReadInConfig will discover and load the configuration file from disk -// and key/value stores, searching in one of the defined paths. -func ReadInConfig() error { return v.ReadInConfig() } -func (v *Viper) ReadInConfig() error { - jww.INFO.Println("Attempting to read in config file") - filename, err := v.getConfigFile() - if err != nil { - return err - } - - if !stringInSlice(v.getConfigType(), SupportedExts) { - return UnsupportedConfigError(v.getConfigType()) - } - - jww.DEBUG.Println("Reading file: ", filename) - file, err := afero.ReadFile(v.fs, filename) - if err != nil { - return err - } - - config := make(map[string]interface{}) - - err = v.unmarshalReader(bytes.NewReader(file), config) - if err != nil { - return err - } - - v.config = config - return nil -} - -// MergeInConfig merges a new configuration with an existing config. -func MergeInConfig() error { return v.MergeInConfig() } -func (v *Viper) MergeInConfig() error { - jww.INFO.Println("Attempting to merge in config file") - filename, err := v.getConfigFile() - if err != nil { - return err - } - - if !stringInSlice(v.getConfigType(), SupportedExts) { - return UnsupportedConfigError(v.getConfigType()) - } - - file, err := afero.ReadFile(v.fs, filename) - if err != nil { - return err - } - - return v.MergeConfig(bytes.NewReader(file)) -} - -// ReadConfig will read a configuration file, setting existing keys to nil if the -// key does not exist in the file. -func ReadConfig(in io.Reader) error { return v.ReadConfig(in) } -func (v *Viper) ReadConfig(in io.Reader) error { - v.config = make(map[string]interface{}) - return v.unmarshalReader(in, v.config) -} - -// MergeConfig merges a new configuration with an existing config. -func MergeConfig(in io.Reader) error { return v.MergeConfig(in) } -func (v *Viper) MergeConfig(in io.Reader) error { - if v.config == nil { - v.config = make(map[string]interface{}) - } - cfg := make(map[string]interface{}) - if err := v.unmarshalReader(in, cfg); err != nil { - return err - } - mergeMaps(cfg, v.config, nil) - return nil -} - -// WriteConfig writes the current configuration to a file. -func WriteConfig() error { return v.WriteConfig() } -func (v *Viper) WriteConfig() error { - filename, err := v.getConfigFile() - if err != nil { - return err - } - return v.writeConfig(filename, true) -} - -// SafeWriteConfig writes current configuration to file only if the file does not exist. -func SafeWriteConfig() error { return v.SafeWriteConfig() } -func (v *Viper) SafeWriteConfig() error { - filename, err := v.getConfigFile() - if err != nil { - return err - } - return v.writeConfig(filename, false) -} - -// WriteConfigAs writes current configuration to a given filename. -func WriteConfigAs(filename string) error { return v.WriteConfigAs(filename) } -func (v *Viper) WriteConfigAs(filename string) error { - return v.writeConfig(filename, true) -} - -// SafeWriteConfigAs writes current configuration to a given filename if it does not exist. -func SafeWriteConfigAs(filename string) error { return v.SafeWriteConfigAs(filename) } -func (v *Viper) SafeWriteConfigAs(filename string) error { - return v.writeConfig(filename, false) -} - -func writeConfig(filename string, force bool) error { return v.writeConfig(filename, force) } -func (v *Viper) writeConfig(filename string, force bool) error { - jww.INFO.Println("Attempting to write configuration to file.") - ext := filepath.Ext(filename) - if len(ext) <= 1 { - return fmt.Errorf("Filename: %s requires valid extension.", filename) - } - configType := ext[1:] - if !stringInSlice(configType, SupportedExts) { - return UnsupportedConfigError(configType) - } - if v.config == nil { - v.config = make(map[string]interface{}) - } - var flags int - if force == true { - flags = os.O_CREATE | os.O_TRUNC | os.O_WRONLY - } else { - if _, err := os.Stat(filename); os.IsNotExist(err) { - flags = os.O_WRONLY - } else { - return fmt.Errorf("File: %s exists. Use WriteConfig to overwrite.", filename) - } - } - f, err := v.fs.OpenFile(filename, flags, os.FileMode(0644)) - if err != nil { - return err - } - return v.marshalWriter(f, configType) -} - -// Unmarshal a Reader into a map. -// Should probably be an unexported function. -func unmarshalReader(in io.Reader, c map[string]interface{}) error { - return v.unmarshalReader(in, c) -} -func (v *Viper) unmarshalReader(in io.Reader, c map[string]interface{}) error { - buf := new(bytes.Buffer) - buf.ReadFrom(in) - - switch strings.ToLower(v.getConfigType()) { - case "yaml", "yml": - if err := yaml.Unmarshal(buf.Bytes(), &c); err != nil { - return ConfigParseError{err} - } - - case "json": - if err := json.Unmarshal(buf.Bytes(), &c); err != nil { - return ConfigParseError{err} - } - - case "hcl": - obj, err := hcl.Parse(string(buf.Bytes())) - if err != nil { - return ConfigParseError{err} - } - if err = hcl.DecodeObject(&c, obj); err != nil { - return ConfigParseError{err} - } - - case "toml": - tree, err := toml.LoadReader(buf) - if err != nil { - return ConfigParseError{err} - } - tmap := tree.ToMap() - for k, v := range tmap { - c[k] = v - } - - case "properties", "props", "prop": - v.properties = properties.NewProperties() - var err error - if v.properties, err = properties.Load(buf.Bytes(), properties.UTF8); err != nil { - return ConfigParseError{err} - } - for _, key := range v.properties.Keys() { - value, _ := v.properties.Get(key) - // recursively build nested maps - path := strings.Split(key, ".") - lastKey := strings.ToLower(path[len(path)-1]) - deepestMap := deepSearch(c, path[0:len(path)-1]) - // set innermost value - deepestMap[lastKey] = value - } - } - - insensitiviseMap(c) - return nil -} - -// Marshal a map into Writer. -func marshalWriter(f afero.File, configType string) error { - return v.marshalWriter(f, configType) -} -func (v *Viper) marshalWriter(f afero.File, configType string) error { - c := v.AllSettings() - switch configType { - case "json": - b, err := json.MarshalIndent(c, "", " ") - if err != nil { - return ConfigMarshalError{err} - } - _, err = f.WriteString(string(b)) - if err != nil { - return ConfigMarshalError{err} - } - - case "hcl": - b, err := json.Marshal(c) - ast, err := hcl.Parse(string(b)) - if err != nil { - return ConfigMarshalError{err} - } - err = printer.Fprint(f, ast.Node) - if err != nil { - return ConfigMarshalError{err} - } - - case "prop", "props", "properties": - if v.properties == nil { - v.properties = properties.NewProperties() - } - p := v.properties - for _, key := range v.AllKeys() { - _, _, err := p.Set(key, v.GetString(key)) - if err != nil { - return ConfigMarshalError{err} - } - } - _, err := p.WriteComment(f, "#", properties.UTF8) - if err != nil { - return ConfigMarshalError{err} - } - - case "toml": - t, err := toml.TreeFromMap(c) - if err != nil { - return ConfigMarshalError{err} - } - s := t.String() - if _, err := f.WriteString(s); err != nil { - return ConfigMarshalError{err} - } - - case "yaml", "yml": - b, err := yaml.Marshal(c) - if err != nil { - return ConfigMarshalError{err} - } - if _, err = f.WriteString(string(b)); err != nil { - return ConfigMarshalError{err} - } - } - return nil -} - -func keyExists(k string, m map[string]interface{}) string { - lk := strings.ToLower(k) - for mk := range m { - lmk := strings.ToLower(mk) - if lmk == lk { - return mk - } - } - return "" -} - -func castToMapStringInterface( - src map[interface{}]interface{}) map[string]interface{} { - tgt := map[string]interface{}{} - for k, v := range src { - tgt[fmt.Sprintf("%v", k)] = v - } - return tgt -} - -func castMapStringToMapInterface(src map[string]string) map[string]interface{} { - tgt := map[string]interface{}{} - for k, v := range src { - tgt[k] = v - } - return tgt -} - -func castMapFlagToMapInterface(src map[string]FlagValue) map[string]interface{} { - tgt := map[string]interface{}{} - for k, v := range src { - tgt[k] = v - } - return tgt -} - -// mergeMaps merges two maps. The `itgt` parameter is for handling go-yaml's -// insistence on parsing nested structures as `map[interface{}]interface{}` -// instead of using a `string` as the key for nest structures beyond one level -// deep. Both map types are supported as there is a go-yaml fork that uses -// `map[string]interface{}` instead. -func mergeMaps( - src, tgt map[string]interface{}, itgt map[interface{}]interface{}) { - for sk, sv := range src { - tk := keyExists(sk, tgt) - if tk == "" { - jww.TRACE.Printf("tk=\"\", tgt[%s]=%v", sk, sv) - tgt[sk] = sv - if itgt != nil { - itgt[sk] = sv - } - continue - } - - tv, ok := tgt[tk] - if !ok { - jww.TRACE.Printf("tgt[%s] != ok, tgt[%s]=%v", tk, sk, sv) - tgt[sk] = sv - if itgt != nil { - itgt[sk] = sv - } - continue - } - - svType := reflect.TypeOf(sv) - tvType := reflect.TypeOf(tv) - if svType != tvType { - jww.ERROR.Printf( - "svType != tvType; key=%s, st=%v, tt=%v, sv=%v, tv=%v", - sk, svType, tvType, sv, tv) - continue - } - - jww.TRACE.Printf("processing key=%s, st=%v, tt=%v, sv=%v, tv=%v", - sk, svType, tvType, sv, tv) - - switch ttv := tv.(type) { - case map[interface{}]interface{}: - jww.TRACE.Printf("merging maps (must convert)") - tsv := sv.(map[interface{}]interface{}) - ssv := castToMapStringInterface(tsv) - stv := castToMapStringInterface(ttv) - mergeMaps(ssv, stv, ttv) - case map[string]interface{}: - jww.TRACE.Printf("merging maps") - mergeMaps(sv.(map[string]interface{}), ttv, nil) - default: - jww.TRACE.Printf("setting value") - tgt[tk] = sv - if itgt != nil { - itgt[tk] = sv - } - } - } -} - -// ReadRemoteConfig attempts to get configuration from a remote source -// and read it in the remote configuration registry. -func ReadRemoteConfig() error { return v.ReadRemoteConfig() } -func (v *Viper) ReadRemoteConfig() error { - return v.getKeyValueConfig() -} - -func WatchRemoteConfig() error { return v.WatchRemoteConfig() } -func (v *Viper) WatchRemoteConfig() error { - return v.watchKeyValueConfig() -} - -func (v *Viper) WatchRemoteConfigOnChannel() error { - return v.watchKeyValueConfigOnChannel() -} - -func (v *Viper) insensitiviseMaps() { - insensitiviseMap(v.config) - insensitiviseMap(v.defaults) - insensitiviseMap(v.override) - insensitiviseMap(v.kvstore) -} - -// Retrieve the first found remote configuration. -func (v *Viper) getKeyValueConfig() error { - if RemoteConfig == nil { - return RemoteConfigError("Enable the remote features by doing a blank import of the viper/remote package: '_ github.com/spf13/viper/remote'") - } - - for _, rp := range v.remoteProviders { - val, err := v.getRemoteConfig(rp) - if err != nil { - continue - } - v.kvstore = val - return nil - } - return RemoteConfigError("No Files Found") -} - -func (v *Viper) getRemoteConfig(provider RemoteProvider) (map[string]interface{}, error) { - reader, err := RemoteConfig.Get(provider) - if err != nil { - return nil, err - } - err = v.unmarshalReader(reader, v.kvstore) - return v.kvstore, err -} - -// Retrieve the first found remote configuration. -func (v *Viper) watchKeyValueConfigOnChannel() error { - for _, rp := range v.remoteProviders { - respc, _ := RemoteConfig.WatchChannel(rp) - //Todo: Add quit channel - go func(rc <-chan *RemoteResponse) { - for { - b := <-rc - reader := bytes.NewReader(b.Value) - v.unmarshalReader(reader, v.kvstore) - } - }(respc) - return nil - } - return RemoteConfigError("No Files Found") -} - -// Retrieve the first found remote configuration. -func (v *Viper) watchKeyValueConfig() error { - for _, rp := range v.remoteProviders { - val, err := v.watchRemoteConfig(rp) - if err != nil { - continue - } - v.kvstore = val - return nil - } - return RemoteConfigError("No Files Found") -} - -func (v *Viper) watchRemoteConfig(provider RemoteProvider) (map[string]interface{}, error) { - reader, err := RemoteConfig.Watch(provider) - if err != nil { - return nil, err - } - err = v.unmarshalReader(reader, v.kvstore) - return v.kvstore, err -} - -// AllKeys returns all keys holding a value, regardless of where they are set. -// Nested keys are returned with a v.keyDelim (= ".") separator -func AllKeys() []string { return v.AllKeys() } -func (v *Viper) AllKeys() []string { - m := map[string]bool{} - // add all paths, by order of descending priority to ensure correct shadowing - m = v.flattenAndMergeMap(m, castMapStringToMapInterface(v.aliases), "") - m = v.flattenAndMergeMap(m, v.override, "") - m = v.mergeFlatMap(m, castMapFlagToMapInterface(v.pflags)) - m = v.mergeFlatMap(m, castMapStringToMapInterface(v.env)) - m = v.flattenAndMergeMap(m, v.config, "") - m = v.flattenAndMergeMap(m, v.kvstore, "") - m = v.flattenAndMergeMap(m, v.defaults, "") - - // convert set of paths to list - a := []string{} - for x := range m { - a = append(a, x) - } - return a -} - -// flattenAndMergeMap recursively flattens the given map into a map[string]bool -// of key paths (used as a set, easier to manipulate than a []string): -// - each path is merged into a single key string, delimited with v.keyDelim (= ".") -// - if a path is shadowed by an earlier value in the initial shadow map, -// it is skipped. -// The resulting set of paths is merged to the given shadow set at the same time. -func (v *Viper) flattenAndMergeMap(shadow map[string]bool, m map[string]interface{}, prefix string) map[string]bool { - if shadow != nil && prefix != "" && shadow[prefix] { - // prefix is shadowed => nothing more to flatten - return shadow - } - if shadow == nil { - shadow = make(map[string]bool) - } - - var m2 map[string]interface{} - if prefix != "" { - prefix += v.keyDelim - } - for k, val := range m { - fullKey := prefix + k - switch val.(type) { - case map[string]interface{}: - m2 = val.(map[string]interface{}) - case map[interface{}]interface{}: - m2 = cast.ToStringMap(val) - default: - // immediate value - shadow[strings.ToLower(fullKey)] = true - continue - } - // recursively merge to shadow map - shadow = v.flattenAndMergeMap(shadow, m2, fullKey) - } - return shadow -} - -// mergeFlatMap merges the given maps, excluding values of the second map -// shadowed by values from the first map. -func (v *Viper) mergeFlatMap(shadow map[string]bool, m map[string]interface{}) map[string]bool { - // scan keys -outer: - for k, _ := range m { - path := strings.Split(k, v.keyDelim) - // scan intermediate paths - var parentKey string - for i := 1; i < len(path); i++ { - parentKey = strings.Join(path[0:i], v.keyDelim) - if shadow[parentKey] { - // path is shadowed, continue - continue outer - } - } - // add key - shadow[strings.ToLower(k)] = true - } - return shadow -} - -// AllSettings merges all settings and returns them as a map[string]interface{}. -func AllSettings() map[string]interface{} { return v.AllSettings() } -func (v *Viper) AllSettings() map[string]interface{} { - m := map[string]interface{}{} - // start from the list of keys, and construct the map one value at a time - for _, k := range v.AllKeys() { - value := v.Get(k) - if value == nil { - // should not happen, since AllKeys() returns only keys holding a value, - // check just in case anything changes - continue - } - path := strings.Split(k, v.keyDelim) - lastKey := strings.ToLower(path[len(path)-1]) - deepestMap := deepSearch(m, path[0:len(path)-1]) - // set innermost value - deepestMap[lastKey] = value - } - return m -} - -// SetFs sets the filesystem to use to read configuration. -func SetFs(fs afero.Fs) { v.SetFs(fs) } -func (v *Viper) SetFs(fs afero.Fs) { - v.fs = fs -} - -// SetConfigName sets name for the config file. -// Does not include extension. -func SetConfigName(in string) { v.SetConfigName(in) } -func (v *Viper) SetConfigName(in string) { - if in != "" { - v.configName = in - v.configFile = "" - } -} - -// SetConfigType sets the type of the configuration returned by the -// remote source, e.g. "json". -func SetConfigType(in string) { v.SetConfigType(in) } -func (v *Viper) SetConfigType(in string) { - if in != "" { - v.configType = in - } -} - -func (v *Viper) getConfigType() string { - if v.configType != "" { - return v.configType - } - - cf, err := v.getConfigFile() - if err != nil { - return "" - } - - ext := filepath.Ext(cf) - - if len(ext) > 1 { - return ext[1:] - } - - return "" -} - -func (v *Viper) getConfigFile() (string, error) { - if v.configFile == "" { - cf, err := v.findConfigFile() - if err != nil { - return "", err - } - v.configFile = cf - } - return v.configFile, nil -} - -func (v *Viper) searchInPath(in string) (filename string) { - jww.DEBUG.Println("Searching for config in ", in) - for _, ext := range SupportedExts { - jww.DEBUG.Println("Checking for", filepath.Join(in, v.configName+"."+ext)) - if b, _ := exists(v.fs, filepath.Join(in, v.configName+"."+ext)); b { - jww.DEBUG.Println("Found: ", filepath.Join(in, v.configName+"."+ext)) - return filepath.Join(in, v.configName+"."+ext) - } - } - - return "" -} - -// Search all configPaths for any config file. -// Returns the first path that exists (and is a config file). -func (v *Viper) findConfigFile() (string, error) { - jww.INFO.Println("Searching for config in ", v.configPaths) - - for _, cp := range v.configPaths { - file := v.searchInPath(cp) - if file != "" { - return file, nil - } - } - return "", ConfigFileNotFoundError{v.configName, fmt.Sprintf("%s", v.configPaths)} -} - -// Debug prints all configuration registries for debugging -// purposes. -func Debug() { v.Debug() } -func (v *Viper) Debug() { - fmt.Printf("Aliases:\n%#v\n", v.aliases) - fmt.Printf("Override:\n%#v\n", v.override) - fmt.Printf("PFlags:\n%#v\n", v.pflags) - fmt.Printf("Env:\n%#v\n", v.env) - fmt.Printf("Key/Value Store:\n%#v\n", v.kvstore) - fmt.Printf("Config:\n%#v\n", v.config) - fmt.Printf("Defaults:\n%#v\n", v.defaults) -}