diff --git a/installer/Gopkg.lock b/installer/Gopkg.lock index 6c41b3f4bc..6e16f83456 100644 --- a/installer/Gopkg.lock +++ b/installer/Gopkg.lock @@ -2,9 +2,13 @@ [[projects]] - branch = "master" name = "github.com/GoASTScanner/gas" - packages = [".","core","output","rules"] + packages = [ + ".", + "core", + "output", + "rules" + ] revision = "6de76c92610b387855cdfdd53c99b149928916f7" [[projects]] @@ -23,8 +27,8 @@ [[projects]] name = "github.com/Sirupsen/logrus" packages = ["."] - revision = "d26492970760ca5d33129d2d799e34be5c4782eb" - version = "v0.11.0" + revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc" + version = "v1.0.5" [[projects]] branch = "master" @@ -38,10 +42,17 @@ revision = "2f1ce7a837dcb8da3ec595b1dac9d0632f0f99e8" version = "v1.3.1" +[[projects]] + name = "github.com/cenkalti/backoff" + packages = ["."] + revision = "2ea60e5f094469f9e65adb9cd103795b73ae743e" + version = "v2.0.0" + [[projects]] name = "github.com/coreos/go-systemd" packages = ["daemon"] - revision = "24036eb3df68550d24a2736c5d013f4e83366866" + revision = "39ca1b05acc7ad1220e09f133283b8859a8b71ab" + version = "v17" [[projects]] name = "github.com/davecgh/go-spew" @@ -57,49 +68,63 @@ [[projects]] name = "github.com/docker/docker" - packages = ["pkg/ioutils","pkg/longpath","pkg/progress"] + packages = [ + "pkg/ioutils", + "pkg/longpath", + "pkg/progress", + "pkg/pubsub" + ] revision = "092cba3727bb9b4a2f0e922cd6c0f93ea270e363" version = "v1.13.1" [[projects]] name = "github.com/docker/go-units" packages = ["."] - revision = "0dadbb0345b35ec7ef35e228dabb8de89a65bf52" - version = "v0.3.2" + revision = "47565b4f722fb6ceae66b95f853feed578a4a51c" + version = "v0.3.3" [[projects]] + branch = "master" name = "github.com/dustin/go-humanize" packages = ["."] - revision = "259d2a102b871d17f30e3cd9881a642961a1e486" + revision = "02af3965c54e8cacf948b97fef38925c4120652c" [[projects]] name = "github.com/gizak/termui" packages = ["."] - revision = "798ffb9cbbe4073ef1f88e6069ca4a2c6aa6676b" + revision = "24acd523c756fd9728824cdfac66aad9d8982fb7" + version = "v2.2.0" [[projects]] - branch = "v2" name = "github.com/go-yaml/yaml" packages = ["."] - revision = "eb3733d160e74a9c7e442f435eb3bea458e1d19f" + revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183" + version = "v2.2.1" [[projects]] name = "github.com/golang/dep" - packages = [".","internal/fs","internal/gps","internal/gps/internal/pb","internal/gps/paths","internal/gps/pkgtree"] + packages = [ + ".", + "internal/fs", + "internal/gps", + "internal/gps/internal/pb", + "internal/gps/paths", + "internal/gps/pkgtree" + ] revision = "8ddfc8afb2d520d41997ebddd921b52152706c01" version = "v0.3.2" [[projects]] branch = "master" name = "github.com/golang/lint" - packages = [".","golint"] - revision = "6aaf7c34af0f4c36a57e0c429bace4d706d8e931" + packages = ["golint"] + revision = "470b6b0bb3005eda157f0275e2e4895055396a81" [[projects]] branch = "master" name = "github.com/golang/protobuf" packages = ["proto"] - revision = "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" + revision = "927b65914520a8b7d44f5c9057611cfec6b2e2d0" [[projects]] name = "github.com/google/uuid" @@ -114,26 +139,28 @@ version = "v0.2.0" [[projects]] - branch = "master" name = "github.com/kr/pretty" packages = ["."] - revision = "cfb55aafdaf3ec08f0db22699ab822c50091b1c4" + revision = "73f6ac0b30a98e433b289500d779f50c1a6f0712" + version = "v0.1.0" [[projects]] - branch = "master" name = "github.com/kr/text" packages = ["."] - revision = "7cafcd837844e784b526369c9bce262804aebc60" + revision = "e2ffdb16a802fe2bb95e2e35ff34f0e53aeef34f" + version = "v0.1.0" [[projects]] name = "github.com/maruel/panicparse" packages = ["stack"] - revision = "25bcac0d793cf4109483505a0d66e066a3a90a80" + revision = "785840568bdc7faa0dfb1cd6c643207f03271f64" + version = "v1.1.1" [[projects]] name = "github.com/mattn/go-runewidth" packages = ["."] - revision = "14207d285c6c197daabb5c9793d63e7af9ab2d50" + revision = "9e777a8366cce605130a531d2cd6363d07ad7317" + version = "v0.0.2" [[projects]] branch = "master" @@ -143,7 +170,17 @@ [[projects]] name = "github.com/nbutton23/zxcvbn-go" - packages = [".","adjacency","data","entropy","frequency","match","matching","scoring","utils/math"] + packages = [ + ".", + "adjacency", + "data", + "entropy", + "frequency", + "match", + "matching", + "scoring", + "utils/math" + ] revision = "eafdab6b0663b4b528c35975c8b0e78be6e25261" version = "v0.1" @@ -154,15 +191,16 @@ revision = "6a197d5ea61168f2ac821de2b7f011b250904900" [[projects]] + branch = "master" name = "github.com/nsf/termbox-go" packages = ["."] - revision = "91bae1bb5fa9ee504905ecbe7043fa30e92feaa3" + revision = "21a4d435a86280a2927985fd6296de56cbce453e" [[projects]] branch = "master" name = "github.com/pelletier/go-toml" packages = ["."] - revision = "4e9e0ee19b60b13eb79915933f44d8ed5f268bdd" + revision = "66540cf1fcd2c3aee6f6787dfa32a6ae9a870f12" [[projects]] name = "github.com/pkg/errors" @@ -191,75 +229,174 @@ [[projects]] name = "github.com/spf13/pflag" packages = ["."] - revision = "e57e3eeb33f795204c1ca35f56c44f83227c6e66" - version = "v1.0.0" + revision = "583c0c0531f06d5278b7d917446061adc344b5cd" + version = "v1.0.1" [[projects]] name = "github.com/stretchr/testify" - packages = ["assert"] - revision = "69483b4bd14f5845b5a1e55bca19e954e827f1d0" - version = "v1.1.4" + packages = [ + "assert", + "require" + ] + revision = "12b6f73e6084dad08a7c6e575284b177ecafbc71" + version = "v1.2.1" [[projects]] - branch = "master" name = "github.com/vmware/govmomi" - packages = [".","find","govc/cli","govc/flags","govc/host/esxcli","internal","license","list","nfc","object","ovf","property","session","task","toolbox","toolbox/hgfs","toolbox/vix","vim25","vim25/debug","vim25/methods","vim25/mo","vim25/progress","vim25/soap","vim25/types","vim25/xml"] - revision = "3619c1d9adde46bd8f0c0ac02de1316caec90cdb" + packages = [ + ".", + "find", + "govc/cli", + "govc/flags", + "govc/host/esxcli", + "internal", + "license", + "list", + "nfc", + "object", + "ovf", + "performance", + "property", + "session", + "task", + "toolbox", + "toolbox/hgfs", + "toolbox/vix", + "view", + "vim25", + "vim25/debug", + "vim25/methods", + "vim25/mo", + "vim25/progress", + "vim25/soap", + "vim25/types", + "vim25/xml" + ] + revision = "123ed177021588bac57b5c87c1a84270ddf2eca8" + version = "v0.17.1" [[projects]] - branch = "master" name = "github.com/vmware/vic" - packages = ["cmd/vic-machine/common","lib/config","lib/config/dynamic","lib/config/executor","lib/constants","lib/guest","lib/install/data","lib/install/validate","lib/migration/feature","lib/spec","pkg/certificate","pkg/errors","pkg/fetcher","pkg/flags","pkg/ip","pkg/log","pkg/log/syslog","pkg/registry","pkg/trace","pkg/version","pkg/vsphere/datastore","pkg/vsphere/extraconfig","pkg/vsphere/extraconfig/vmomi","pkg/vsphere/optmanager","pkg/vsphere/session","pkg/vsphere/sys","pkg/vsphere/tags","pkg/vsphere/tasks","pkg/vsphere/test/env","pkg/vsphere/vm"] - revision = "a3d7a5ff74a3c34695daf848a189ea0994ca68df" + packages = [ + "cmd/vic-machine/common", + "lib/config", + "lib/config/dynamic", + "lib/config/executor", + "lib/constants", + "lib/guest", + "lib/install/data", + "lib/install/opsuser", + "lib/install/validate", + "lib/migration/feature", + "lib/spec", + "pkg/certificate", + "pkg/errors", + "pkg/fetcher", + "pkg/flags", + "pkg/ip", + "pkg/log", + "pkg/log/syslog", + "pkg/registry", + "pkg/retry", + "pkg/trace", + "pkg/version", + "pkg/vsphere/compute", + "pkg/vsphere/compute/placement", + "pkg/vsphere/datastore", + "pkg/vsphere/extraconfig", + "pkg/vsphere/extraconfig/vmomi", + "pkg/vsphere/optmanager", + "pkg/vsphere/performance", + "pkg/vsphere/rbac", + "pkg/vsphere/session", + "pkg/vsphere/sys", + "pkg/vsphere/tags", + "pkg/vsphere/tasks", + "pkg/vsphere/test/env", + "pkg/vsphere/vm" + ] + revision = "6c385b0f31264c3b18f4d773f0fe799b2396b860" + version = "v1.4.0" [[projects]] + branch = "master" name = "github.com/vmware/vmw-guestinfo" - packages = ["bdoor","message","rpcout","rpcvmx","vmcheck"] - revision = "0b8dbcdd226303bf776dcac49c2d5f3b8a8190fd" + packages = [ + "bdoor", + "message", + "rpcout", + "rpcvmx", + "vmcheck" + ] + revision = "25eff159a728be87e103a0b8045e08273f4dbec4" [[projects]] branch = "master" name = "golang.org/x/crypto" - packages = ["nacl/secretbox","poly1305","salsa20/salsa","ssh/terminal"] - revision = "bd6f299fb381e4c3393d1c4b1f0b94f5e77650c8" + packages = [ + "nacl/secretbox", + "poly1305", + "salsa20/salsa", + "ssh/terminal" + ] + revision = "1a580b3eff7814fc9b40602fd35256c63b50f491" [[projects]] + branch = "master" + name = "golang.org/x/lint" + packages = ["."] + revision = "470b6b0bb3005eda157f0275e2e4895055396a81" + +[[projects]] + branch = "master" name = "golang.org/x/net" - packages = ["context","context/ctxhttp"] - revision = "a6577fac2d73be281a500b310739095313165611" + packages = [ + "context", + "context/ctxhttp" + ] + revision = "2491c5de3490fced2f6cff376127c667efeed857" [[projects]] branch = "master" name = "golang.org/x/sync" packages = ["errgroup"] - revision = "fd80eb99c8f653c847d294a001bdf2a3a6f768f5" + revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca" [[projects]] + branch = "master" name = "golang.org/x/sys" - packages = ["unix","windows"] - revision = "99f16d856c9836c42d24e7ab64ea72916925fa97" + packages = [ + "unix", + "windows" + ] + revision = "7c87d13f8e835d2fb3a70a2912c811ed0c1d241b" [[projects]] branch = "master" name = "golang.org/x/time" packages = ["rate"] - revision = "6dc17368e09b0e8634d71cac8168d853e869a0c7" + revision = "fbb02b2291d28baffd63558aa44b4b56f178d650" [[projects]] branch = "master" name = "golang.org/x/tools" - packages = ["go/gcexportdata","go/gcimporter15","go/types/typeutil"] - revision = "c84da9fa1c1dbcae4557e8c8509abeb814ed9632" + packages = [ + "go/ast/astutil", + "go/gcexportdata", + "go/internal/gcimporter", + "go/types/typeutil" + ] + revision = "48418e5732e1b1e2a10207c8007a5f959e422f20" [[projects]] name = "gopkg.in/urfave/cli.v1" packages = ["."] - revision = "0bdeddeeb0f650497d603c4ad7b20cfe685682f6" - version = "v1.19.1" + revision = "cfb38830724cc34fedffe9a2a29fb54fa9169cd1" + version = "v1.20.0" [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "3c09d851e1b0d042e18b46ec5a8f504bf5662bb3e9b81f8c150c22df95b18c5d" + inputs-digest = "3d2c42f997954ab87dcb4e1d9e0e53bcafabec022e52b26883118ec410ce2488" solver-name = "gps-cdcl" solver-version = 1 diff --git a/installer/Gopkg.toml b/installer/Gopkg.toml index 9f5dd71259..6902b45da6 100644 --- a/installer/Gopkg.toml +++ b/installer/Gopkg.toml @@ -22,41 +22,23 @@ required = ["github.com/golang/dep", "github.com/GoASTScanner/gas", "github.com/go-yaml/yaml", "github.com/golang/lint/golint"] -[[constraint]] - name = "github.com/Sirupsen/logrus" - version = "0.11.0" - -[[constraint]] - name = "github.com/coreos/go-systemd" - -[[constraint]] - name = "github.com/dustin/go-humanize" - -[[constraint]] - name = "github.com/gizak/termui" - -[[constraint]] - name = "github.com/gorilla/websocket" - -[[constraint]] - name = "github.com/spf13/pflag" - version = "1.0.0" - -[[constraint]] - name = "github.com/vmware/govmomi" - [[constraint]] name = "github.com/vmware/vic" - branch = "master" + version = "v1.4.0" [[constraint]] - name = "github.com/vmware/vmw-guestinfo" - -[[constraint]] - name = "gopkg.in/urfave/cli.v1" - version = "1.19.1" + name = "github.com/vmware/govmomi" + version = "v0.17.1" [[constraint]] name = "github.com/golang/dep" version = "0.3.2" +[[constraint]] + name = "github.com/GoASTScanner/gas" + revision = "6de76c92610b387855cdfdd53c99b149928916f7" + +[prune] + non-go = true + go-tests = true + unused-packages = true diff --git a/installer/Makefile b/installer/Makefile index b595525200..fcc19c605d 100644 --- a/installer/Makefile +++ b/installer/Makefile @@ -54,7 +54,7 @@ vendor: $(DEP) gas: $(GAS) @echo running go AST tool - @$(GAS) -quiet fileserver/... landing_server/... lib/... ovatools/... pkg/... tagvm/... toolbox/... 2> /dev/null + @$(GAS) -quiet fileserver/... landing_server/... lib/... ovatools/... pkg/... toolbox/... 2> /dev/null golint: $(GOLINT) @echo checking go lint... @@ -97,8 +97,6 @@ golintf = $(GOLINT) $(1) | sh -c "! grep -v 'lib/apiservers/portlayer/restapi/op $(ovfenv): $(call godeps,ovatools/ovfenv/*.go) @echo building ovfenv linux... - @echo build $(dir $<) - @echo output $@ @GOARCH=amd64 GOOS=linux $(TIME) $(GO) build $(RACE) -ldflags "$(LDFLAGS)" -o ./$@ ./$(dir $<) $(vic-ova-ui): $(call godeps,ovatools/vic-ova-ui/*.go) diff --git a/installer/build/bootable/build-base.sh b/installer/build/bootable/build-base.sh index 2acc7ab5bf..8643fe17cc 100755 --- a/installer/build/bootable/build-base.sh +++ b/installer/build/bootable/build-base.sh @@ -57,7 +57,7 @@ function set_base() { pkgconfig dbus cpio\ photon-release tdnf \ openssh linux-esx sed \ - gzip tar xz bzip2 \ + gzip zip tar xz bzip2 \ glibc iana-etc \ ca-certificates \ curl which initramfs \ diff --git a/installer/build/bootable/build-main.sh b/installer/build/bootable/build-main.sh index 4bec6f086e..293381b3c1 100755 --- a/installer/build/bootable/build-main.sh +++ b/installer/build/bootable/build-main.sh @@ -86,6 +86,11 @@ function build_app { SOURCE=$(jq '.['$LINE_NUM'] | .source' "${MANIFEST}" | tr -d '"') DESTINATION=$(echo "${ROOT}/$(cat "${MANIFEST}" | jq '.['$LINE_NUM'] | .destination')" | tr -d '"' ) mkdir -p "$(dirname "$DESTINATION")" && cp -R $SOURCE "$DESTINATION" + if [[ "$DESTINATION" == *"fileserver/html"* ]]; then + pushd $DESTINATION + rm -fr .gitignore karma* package* js/fixtures js/specs + popd + fi fi LINE_NUM=$((LINE_NUM+1)) done @@ -104,6 +109,7 @@ function build_app { TERM="$TERM" \ DEBUG="$DEBUG" \ BUILD_VICENGINE_FILE="${BUILD_VICENGINE_FILE}" \ + BUILD_VICUI_FILE="${BUILD_VICUI_FILE}" \ BUILD_HARBOR_FILE="${BUILD_HARBOR_FILE}" \ BUILD_ADMIRAL_REVISION="${BUILD_ADMIRAL_REVISION}" \ BUILD_OVA_REVISION="${BUILD_OVA_REVISION}" \ diff --git a/installer/build/build-cache.sh b/installer/build/build-cache.sh index f290a154fe..adb8248bbb 100755 --- a/installer/build/build-cache.sh +++ b/installer/build/build-cache.sh @@ -36,6 +36,7 @@ images=( downloads=( ${BUILD_HARBOR_URL} ${BUILD_VICENGINE_URL} + ${BUILD_VICUI_URL} ) function timecho { @@ -84,7 +85,7 @@ function cacheOther() { else timecho "${yarrow} downloading and saving ${brprpl}${filename}${reset}" set +e - basefile=$(ls "$(dirname "$archive")/$(echo "${filename}" | cut -f1 -d"-" | cut -f1 -d"_" | cut -f1 -d".")"* 2>/dev/null) + basefile=$(ls "$(dirname "$archive")/$(echo "${filename}" | grep -v vic | cut -f1 -d"-" | cut -f1 -d"_" | cut -f1 -d".")"* 2>/dev/null) [ $? -eq 0 ] && [ -f "$basefile" ] && rm "$basefile"* set -e add "${download}" "$archive" diff --git a/installer/build/build-ova.sh b/installer/build/build-ova.sh index 336e799979..fd13928116 100755 --- a/installer/build/build-ova.sh +++ b/installer/build/build-ova.sh @@ -57,6 +57,10 @@ do VICENGINE="$2" shift 2 # past argument ;; + --vicui) + VICUI="$2" + shift 2 # past argument + ;; --vicmachineserver) VIC_MACHINE_SERVER="$2" shift 2 # past argument @@ -84,6 +88,12 @@ if [ -z "${VICENGINE}" ]; then fi setenv VICENGINE "$url" +url="" +if [ -z "${VICUI}" ]; then + url=$(gsutil ls -l "gs://vic-ui-builds" | grep -v TOTAL | grep vic_ | sort -k2 -r | (trap '' PIPE; head -1) | xargs | cut -d " " -f 3 | sed 's/gs:\/\//https:\/\/storage.googleapis.com\//') +fi +setenv VICUI "$url" + #set Harbor url="" if [ -z "${HARBOR}" ]; then @@ -104,6 +114,8 @@ export BUILD_HARBOR_FILE=${BUILD_HARBOR_FILE:-} export BUILD_HARBOR_URL=${BUILD_HARBOR_URL:-} export BUILD_VICENGINE_FILE=${BUILD_VICENGINE_FILE:-} export BUILD_VICENGINE_URL=${BUILD_VICENGINE_URL:-} +export BUILD_VICUI_FILE=${BUILD_VICUI_FILE:-} +export BUILD_VICUI_URL=${BUILD_VICUI_URL:-} export BUILD_VIC_MACHINE_SERVER_REVISION=${BUILD_VIC_MACHINE_SERVER_REVISION:-} export BUILD_ADMIRAL_REVISION=${BUILD_ADMIRAL_REVISION:-} export BUILD_OVA_REVISION=${BUILD_OVA_REVISION:-} @@ -134,6 +146,7 @@ drone deploy --param VICENGINE=${BUILD_VICENGINE_URL:-} \\ --param VIC_MACHINE_SERVER=${BUILD_VIC_MACHINE_SERVER_REVISION:-} \\ --param ADMIRAL=${BUILD_ADMIRAL_REVISION:-} \\ --param HARBOR=${BUILD_HARBOR_URL:-} \\ + --param VICUI=${BUILD_VICUI_URL:-} \\ vmware/vic-product ${DRONE_BUILD_NUMBER:-} staging EOF elif [ "deployment" == "${DRONE_BUILD_EVENT}" -a "staging" == "${DRONE_DEPLOY_TO}" ]; then @@ -144,6 +157,7 @@ drone deploy --param VICENGINE=${BUILD_VICENGINE_URL:-} \\ --param VIC_MACHINE_SERVER=${BUILD_VIC_MACHINE_SERVER_REVISION:-} \\ --param ADMIRAL=${BUILD_ADMIRAL_REVISION:-} \\ --param HARBOR=${BUILD_HARBOR_URL:-} \\ + --param VICUI=${BUILD_VICUI_URL:-} \\ vmware/vic-product ${DRONE_BUILD_NUMBER:-} release EOF fi diff --git a/installer/build/build.sh b/installer/build/build.sh index b5215d62df..1903c75a66 100755 --- a/installer/build/build.sh +++ b/installer/build/build.sh @@ -39,8 +39,8 @@ function usage() { echo -e "Usage: [--admiral|--vicmachineserver] - [--vicengine|--harbor] - [--vicengine|--harbor] + [--vicengine|--harbor|--vicui] + [--vicengine|--harbor|--vicui] [passthrough args for ./bootable/build-main.sh, eg. '-b bin/.vic-appliance-base.tar.gz'] ie: $0 ova-dev --harbor v1.2.0-38-ge79334a --vicengine https://storage.googleapis.com/vic-engine-builds/vic_13806.tar.gz --admiral v1.2" >&2 exit 1 diff --git a/installer/build/ova-manifest.json b/installer/build/ova-manifest.json index a3df3c4331..1871ec2a31 100644 --- a/installer/build/ova-manifest.json +++ b/installer/build/ova-manifest.json @@ -173,11 +173,6 @@ "source": "../fileserver/files/open_source_license.txt", "destination": "/opt/vmware/fileserver/files/open_source_license.txt" }, - { - "type": "file", - "source": "../fileserver/files/error_index.html", - "destination": "/opt/vmware/fileserver/index.html" - }, { "type": "file", "source": "../landing_server/html", diff --git a/installer/build/scripts/fileserver/configure_fileserver.sh b/installer/build/scripts/fileserver/configure_fileserver.sh index e40385b129..315e8ad7fa 100755 --- a/installer/build/scripts/fileserver/configure_fileserver.sh +++ b/installer/build/scripts/fileserver/configure_fileserver.sh @@ -14,59 +14,6 @@ # limitations under the License. set -uf -o pipefail -umask 077 -data_dir="/opt/vmware/fileserver" -files_dir="${data_dir}/files" -cert="/storage/data/certs/server.crt" -error_index_file="index.html" - -ca_download_dir="${data_dir}/ca_download" -mkdir -p ${ca_download_dir} - -function updateConfigFiles { - set -e - # cove cli has package in form of vic-adm_*.tar.gz, so use 'vic_*.tar.gz' here - # to avoid including cove cli - tar_gz=$(find "${data_dir}" -maxdepth 1 -name "vic_*.tar.gz") - - # untar vic package to tmp dir - tar -zxf "${tar_gz}" -C /tmp - - # get certificate thumbprint - tp=$(openssl x509 -fingerprint -noout -in "${cert}" | awk -F= '{print $2}') - - # replace configs files - lconfig=/tmp/vic/ui/VCSA/configs - wconfig=/tmp/vic/ui/vCenterForWindows/configs - - cur_tp_l=$(awk '/VIC_UI_HOST_THUMBPRINT=/{print $NF}' $lconfig) - sed -i -e s/${cur_tp_l}/VIC_UI_HOST_THUMBPRINT=\"${tp}\"/g $lconfig - - cur_tp_w=$(awk '/vic_ui_host_thumbprint=/{print $NF}' $wconfig) - sed -i -e s/${cur_tp_w}/vic_ui_host_thumbprint=${tp}/g $wconfig - - file_server="https://${HOSTNAME}:${FILESERVER_PORT}" - cur_file_server_l=$(awk '/VIC_UI_HOST_URL=/{print $NF}' $lconfig) - sed -i -e s%${cur_file_server_l}%VIC_UI_HOST_URL=\"${file_server}\"%g $lconfig - - cur_file_server_w=$(awk '/vic_ui_host_url=/{print $NF}' $wconfig) - sed -i -e s%${cur_file_server_w}%vic_ui_host_url=${file_server}%g $wconfig - - # tar all files again - tar zcf "$files_dir/$(basename $tar_gz)" -C /tmp vic - rm -rf /tmp/vic -} +mkdir -p "/opt/vmware/fileserver/ca_download" iptables -w -A INPUT -j ACCEPT -p tcp --dport "${FILESERVER_PORT}" - -# Update configurations, run in subshell to preserve +e -( updateConfigFiles ) -if [ $? -eq 0 ]; then - echo "Fileserver configuration complete." - if [ -f "${error_index_file}" ]; then - rm "${files_dir}/${error_index_file}" - fi -else - echo "Fileserver configuration failed." - cp "${data_dir}/${error_index_file}" "${files_dir}/${error_index_file}" -fi diff --git a/installer/build/scripts/provisioners/provision_fileserver.sh b/installer/build/scripts/provisioners/provision_fileserver.sh index 07dcad543a..3bda2c7eba 100644 --- a/installer/build/scripts/provisioners/provision_fileserver.sh +++ b/installer/build/scripts/provisioners/provision_fileserver.sh @@ -1,5 +1,5 @@ #!/usr/bin/bash -# Copyright 2017 VMware, Inc. All Rights Reserved. +# Copyright 2018 VMware, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,17 +19,52 @@ FILES_DIR="${DATA_DIR}/files" mkdir -p /etc/vmware/fileserver # Fileserver config scripts mkdir -p ${FILES_DIR} # Files to serve -mkdir -p ${DATA_DIR} # Backup of the original vic tar - -cd /var/tmp echo "Provisioning VIC Engine ${BUILD_VICENGINE_FILE}" -cp /etc/cache/${BUILD_VICENGINE_FILE} . +cp "/etc/cache/${BUILD_VICENGINE_FILE}" ${FILES_DIR} + +echo "Provisioning VIC UI ${BUILD_VICUI_FILE}" + +TMP_FOLDER=/tmp/vic-ui +mkdir -p ${TMP_FOLDER} +cd ${TMP_FOLDER} +tar -xzf /etc/cache/${BUILD_VICUI_FILE} -C "${TMP_FOLDER}" # creates ${TMP_FOLDER}/bin/ui/.... + +# get version strings +VIC_BIN_ROOT="${TMP_FOLDER}/bin/" +FULL_VER_STRING=$(echo "${BUILD_OVA_REVISION}" | sed -e 's/\-rc[[:digit:]]//g') +MAJOR_MINOR_PATCH=$(echo $FULL_VER_STRING | awk -F- '{print $1}' | cut -c 2-) +BUILD_NUMBER=$(echo $FULL_VER_STRING | awk -F- '{print $2}') +VIC_ENGINE_VER_STRING=${MAJOR_MINOR_PATCH}.${BUILD_NUMBER} +VIC_UI_VER_STRING=$(ls -l ${VIC_BIN_ROOT}ui/plugin-packages | grep '^d' | head -1 | awk '{print $9}' | awk -F- '{print $2}') + +# update plugin-package.xml for H5 Client plugin +echo "Updating description for H5 Client plugin to \"vSphere Client Plugin for vSphere Integrated Containers Engine (v${VIC_ENGINE_VER_STRING})"\" +cd ${VIC_BIN_ROOT}ui/plugin-packages/com.vmware.vic-${VIC_UI_VER_STRING} +sed -i "s/H5 Client Plugin for vSphere Integrated Containers Engine/vSphere Client Plugin for vSphere Integrated Containers Engine \(v${VIC_ENGINE_VER_STRING}\)/" plugin-package.xml +zip -9 -r ${VIC_BIN_ROOT}ui/plugin-packages/com.vmware.vic-${VIC_UI_VER_STRING}.zip ./* +cd ${TMP_FOLDER} + +# update plugin-package.xml for Flex Client plugin +echo "Updating description for Flex Client plugin to \"vSphere Client Plugin for vSphere Integrated Containers Engine (v${VIC_ENGINE_VER_STRING})\"" +cd ${VIC_BIN_ROOT}ui/vsphere-client-serenity/com.vmware.vic.ui-${VIC_UI_VER_STRING} +sed -i "s/Flex Client Plugin for vSphere Integrated Containers Engine/vSphere Client Plugin for vSphere Integrated Containers Engine \(v${VIC_ENGINE_VER_STRING}\)/" plugin-package.xml +zip -9 -r ${VIC_BIN_ROOT}ui/vsphere-client-serenity/com.vmware.vic.ui-${VIC_UI_VER_STRING}.zip ./* +cd ${TMP_FOLDER} + +# update plugin-manifest +sed -i "s/summary=.*/summary=\"vSphere Client Plugin for vSphere Integrated Containers Engine (v${VIC_ENGINE_VER_STRING})\"/" ${VIC_BIN_ROOT}ui/plugin-manifest + +echo "version from the vic-ui repo is: ${VIC_UI_VER_STRING}" +echo "version from vic-engine is: ${VIC_ENGINE_VER_STRING}" + +tar -czf ${FILES_DIR}/${BUILD_VICUI_FILE} -C ${TMP_FOLDER} . +find . -iname "*.zip" -exec cp {} ${FILES_DIR} \; -# Copy UI plugin zip files to fileserver directory -tar tf "${BUILD_VICENGINE_FILE}" | grep "vic/ui" | grep ".zip" | xargs -I '{}' tar xzf "${BUILD_VICENGINE_FILE}" -C ${FILES_DIR} '{}' --strip-components=3 +# clean up scratch folders +rm -rf ${TMP_FOLDER} -mv "${BUILD_VICENGINE_FILE}" ${DATA_DIR} +ls -l ${FILES_DIR} # Write version files echo "engine=${BUILD_VICENGINE_FILE}" >> /data/version diff --git a/installer/build/scripts/upgrade/upgrade.sh b/installer/build/scripts/upgrade/upgrade.sh index 94a0393aa2..0ca18f3f71 100755 --- a/installer/build/scripts/upgrade/upgrade.sh +++ b/installer/build/scripts/upgrade/upgrade.sh @@ -46,6 +46,7 @@ DESTROY_ENABLED="" MANUAL_DISK_MOVE="" EMBEDDED_PSC="" INSECURE_SKIP_VERIFY="" +UPGRADE_UI_PLUGIN="" TIMESTAMP=$(date +"%Y-%m-%d %H:%M:%S %z %Z") export REDIRECT_ENABLED=0 @@ -86,22 +87,82 @@ function usage { [--embedded-psc]: Using embedded PSC. Do not prompt for external PSC options. [--ssh-insecure-skip-verify]: Skip host key checking when SSHing to the old appliance. + [--upgrade-ui-plugin]: Upgrade ui plugin. " } +# A plugin upgrade is a forced plugin install +function callPluginUpgradeEndpoint { + local preset=$1 + local vc='{"target":"'"${VCENTER_TARGET}"'","user":"'"${VCENTER_USERNAME}"'","password":"'"${VCENTER_PASSWORD}"'","thumbprint":"'"${VCENTER_FINGERPRINT}"'"}' + local vc_info='{"target":"'"${VCENTER_TARGET}"'","user":"'"${VCENTER_USERNAME}"'","thumbprint":"'"${VCENTER_FINGERPRINT}"'"}' + local plugin='{"preset":"'"${preset}"'","force":true}' + local payload='{"vc":"${vc}","plugin":"${plugin}"}' + local payload_info='{"vc":"${vc_info}","plugin":"${plugin}"}' + echo "register payload - ${payload_info}" >> $upgrade_log_file 2>&1 + /usr/bin/curl \ + -k \ + -s \ + -o /dev/null \ + --write-out "%{http_code}\\n" \ + --header "Content-Type: application/json" \ + -X POST \ + --data "${payload}" \ + https://localhost:9443/plugin/upgrade +} + +function upgradeAppliancePlugin { + # Upgrade the flex client... + tab_retries=0 + max_tab_retries=30 # 5 minutes + + ret=$(callPluginUpgradeEndpoint FLEX) + while [[ "$ret" != *"204"* && "$ret" != *"5"* && ${tab_retries} -lt ${max_tab_retries} ]]; do + log "Waiting for upgrade appliance flex plugin..." + sleep 10 + let "tab_retries+=1" + ret=$(callPluginUpgradeEndpoint FLEX) + done + + if [[ ${tab_retries} -eq ${max_tab_retries} || "$ret" == *"5"* ]]; then + log "WARNING: Plugin upgrade failed for the FLEX client. This is expected on vCenter versions 6.7 or higher." + log "WARNING: If you expected this to pass on older versions of vSphere, please check your credentials and try again, or contact VMware Support." + fi + + # Upgrade the H5 client... + tab_retries=0 + ret=$(callPluginUpgradeEndpoint H5) + while [[ "$ret" != *"204"* && "$ret" != *"5"* && ${tab_retries} -lt ${max_tab_retries} ]]; do + log "Waiting for upgrade appliance h5 plugin..." + sleep 10 + let "tab_retries+=1" + ret=$(callPluginUpgradeEndpoint H5) + done + + if [[ ${tab_retries} -eq ${max_tab_retries} || "$ret" == *"5"* ]]; then + log "Failed to upgrade appliance h5 plugin. Check vCenter target settings, or contact VMware support." + exit 1 + fi +} + function callRegisterEndpoint { + local payload='{"target":"'"${VCENTER_TARGET}"'","user":"'"${VCENTER_USERNAME}"'","password":"'"${VCENTER_PASSWORD}"'","thumbprint":"'"${VCENTER_FINGERPRINT}"'","externalpsc":"'"${EXTERNAL_PSC}"'","pscdomain":"'"${PSC_DOMAIN}"'"}' + local payload_info='{"target":"'"${VCENTER_TARGET}"'","user":"'"${VCENTER_USERNAME}"'","thumbprint":"'"${VCENTER_FINGERPRINT}"'","externalpsc":"'"${EXTERNAL_PSC}"'","pscdomain":"'"${PSC_DOMAIN}"'"}' + echo "register payload - ${payload_info}" >> $upgrade_log_file 2>&1 /usr/bin/curl \ -k \ - --write-out '%{http_code}' \ + -s \ + -o /dev/null \ + --write-out "%{http_code}\\n" \ --header "Content-Type: application/json" \ -X POST \ - --data '{"target":"'"${VCENTER_TARGET}"'","user":"'"${VCENTER_USERNAME}"'","password":"'"${VCENTER_PASSWORD}"'","externalpsc":"'"${EXTERNAL_PSC}"'","pscdomain":"'"${PSC_DOMAIN}"'"}' \ + --data "${payload}" \ https://localhost:9443/register } # Register appliance for content trust function registerAppliance { - + log "Registering the appliance in PSC" tab_retries=0 max_tab_retries=30 # 5 minutes while [[ "$(callRegisterEndpoint)" != *"200"* && ${tab_retries} -lt ${max_tab_retries} ]]; do @@ -114,7 +175,6 @@ function registerAppliance { log "Failed to register appliance. Check vCenter target and credentials and provided PSC settings." exit 1 fi - } # Get PSC tokens for SSO integration @@ -489,6 +549,9 @@ function main { --ssh-insecure-skip-verify) INSECURE_SKIP_VERIFY="1" ;; + --upgrade-ui-plugin) + UPGRADE_UI_PLUGIN="y" + ;; -h|--help|*) usage exit 0 @@ -522,6 +585,7 @@ function main { echo "TLS connection is not secure, unable to proceed with upgrade. Please contact VMware support. Exiting..." exit 1 fi + export VCENTER_FINGERPRINT="$(echo "${fingerprint}" | awk '{print $2}')" echo "${fingerprint}" > $GOVC_TLS_KNOWN_HOSTS else log "Using provided vCenter fingerprint from --fingerprint ${VCENTER_FINGERPRINT}" @@ -532,10 +596,11 @@ function main { export GOVC_DATACENTER="$VCENTER_DATACENTER" [ -z "${APPLIANCE_TARGET}" ] && read -p "Enter old VIC appliance IP: " APPLIANCE_TARGET [ -z "${APPLIANCE_USERNAME}" ] && read -p "Enter old VIC appliance username: " APPLIANCE_USERNAME + [ -z "${UPGRADE_UI_PLUGIN}" ] && read -p "Upgrade VIC UI Plugin? (y/n): " UPGRADE_UI_PLUGIN if [ -n "${DESTROY_ENABLED}" ] ; then local resp="" - read -p "Destroy option enabled. This will delete the old VIC appliance after upgrade. Are you sure? (y/n):" resp + read -p "Destroy option enabled. This will delete the old VIC appliance after upgrade. Are you sure? (y/n): " resp if [ "$resp" != "y" ]; then echo "Exiting..." exit 1 @@ -576,6 +641,11 @@ function main { ### -------------------- ### ### Component Upgrades ### ### -------------------- ### + if [ "$UPGRADE_UI_PLUGIN" == "y" ]; then + log "\n-------------------------\nStarting VIC UI Plugin Upgrade ${TIMESTAMP}\n" + upgradeAppliancePlugin + fi + log "\n-------------------------\nStarting Admiral Upgrade ${TIMESTAMP}\n" upgradeAdmiral log "\n-------------------------\nStarting Harbor Upgrade ${TIMESTAMP}\n" @@ -605,7 +675,11 @@ function finish() { if [ "$rc" -eq 0 ]; then log "" log "-------------------------" - log "Upgrade completed successfully. Exiting." + if [ "$UPGRADE_UI_PLUGIN" == "y" ]; then + log "Upgrade completed successfully. Exiting. All vSphere Client users must log out and log back in again twice to see the vSphere Integrated Containers plug-in." + else + log "Upgrade completed successfully. Exiting." + fi log "-------------------------" log "" else diff --git a/installer/fileserver/files/error_index.html b/installer/fileserver/files/error_index.html deleted file mode 100644 index 63fda84ed5..0000000000 --- a/installer/fileserver/files/error_index.html +++ /dev/null @@ -1,12 +0,0 @@ - -

Error starting VIC Appliance Fileserver.

-

The VIC Appliance Fileserver failed to configure the VIC archive.

-

It may contain incorrect values required to install the VIC UI plugin.

-

- In order to correct this error, you must do one of the following: -

    -
  • Restart the guest OS.
  • -
  • SSH to the VIC appliance and execute
    systemctl restart fileserver
  • -
-

- \ No newline at end of file diff --git a/installer/fileserver/html/.gitignore b/installer/fileserver/html/.gitignore new file mode 100644 index 0000000000..73c33901e7 --- /dev/null +++ b/installer/fileserver/html/.gitignore @@ -0,0 +1,41 @@ +# Created by https://www.gitignore.io/api/node + +### Node ### +# Logs +logs +*.log +npm-debug.log* + +# Runtime data +pids +*.pid +*.seed +*.pid.lock + +# Directory for instrumented libs generated by jscoverage/JSCover +lib-cov + +# Coverage directory used by tools like istanbul +coverage + +# nyc test coverage +.nyc_output + +# Compiled binary addons (http://nodejs.org/api/addons.html) +build/Release + +# Dependency directories +node_modules/ +jspm_packages/ + +# Optional npm cache directory +.npm + +# Optional eslint cache +.eslintcache + +# Output of 'npm pack' +*.tgz + +# dotenv environment variables file +.env \ No newline at end of file diff --git a/installer/fileserver/html/index.html b/installer/fileserver/html/index.html index 9b9866a08c..39d48fea3f 100644 --- a/installer/fileserver/html/index.html +++ b/installer/fileserver/html/index.html @@ -18,7 +18,6 @@ - @@ -30,35 +29,34 @@
-
-
- {{if .InitErrorFeedback }} -
-
- {{ .InitErrorFeedback }} -
- Re-initialize +
+
+ {{if .InitErrorFeedback }} +
+
+ {{ .InitErrorFeedback}} +
-
- {{ end }} - {{if .InitSuccessFeedback }} -
-
- {{ .InitSuccessFeedback }} + {{ end }} + {{if .InitSuccessFeedback }} +
+
+ {{ .InitSuccessFeedback }} +
-
- {{ end }} -
-
-
-

Getting Started

+ {{ end }} +
+
+

vSphere Integrated + Containers Appliance

+ Documentation +
-
@@ -141,13 +139,49 @@

Infrastructure Deployment Tools

- +
{{if .NeedLogin}} -
- - + - diff --git a/installer/fileserver/html/js/fixtures/index.html b/installer/fileserver/html/js/fixtures/index.html new file mode 100755 index 0000000000..e1934c69c2 --- /dev/null +++ b/installer/fileserver/html/js/fixtures/index.html @@ -0,0 +1,96 @@ +
+ + + +
diff --git a/installer/fileserver/html/js/index-handler.js b/installer/fileserver/html/js/index-handler.js new file mode 100644 index 0000000000..29cbd9c2bd --- /dev/null +++ b/installer/fileserver/html/js/index-handler.js @@ -0,0 +1,82 @@ +function getThumbprint(target,callback) { + var xhr = new XMLHttpRequest(); + var thumbprint; + xhr.open('POST', '/thumbprint?target=' + target); + xhr.onreadystatechange = function() { + callback(xhr.response,xhr.status); + }; + xhr.send(); +} + +function checkRegistryForm() { + var cansubmit = true; + + if (document.getElementById('target').value.length == 0 || document.getElementById('user').value.length == 0 || document.getElementById('password').value.length == 0){ + cansubmit = false; + } + + document.getElementById('login-submit').disabled = !cansubmit; +} +function checkThumbrintInput() { + var cansubmit = true; + + if (document.getElementById('thumbprint-show').value.length == 0){ + cansubmit = false; + } + + document.getElementById('plugin-submit').disabled = !cansubmit; +} + +function submitRegistration() { +//login elements + +$loginForm = document.getElementById('login-form'); +$loginSpinner = document.getElementById('login-spinner'); +$loginBody = document.getElementById('login-body'); +$loginSubmit = document.getElementById('login-submit'); +$loginModal = document.getElementById('login-modal'); +//plugin installations elements + +$pluginForm = document.getElementById('plugin-form'); +$pluginSpinner = document.getElementById('plugin-spinner'); +$pluginBody = document.getElementById('plugin-body'); +$pluginSubmit = document.getElementById('plugin-submit'); +$pluginModal = document.getElementById('plugin-modal'); + + if ($loginForm) { + event.preventDefault(); + $loginSubmit.setAttribute('disabled', 'disabled'); + $loginBody.style.display = 'none'; + $loginSpinner.style.display = ''; + $vc = document.getElementById('target').value; + getThumbprint($vc,function (thumbprint,status) { + if (status === 200){ + $loginModal.style.display = 'none'; + $pluginModal.style.display = ''; + checkThumbrintInput(); + document.getElementById('thumbprint').value = thumbprint; + document.getElementById('thumbprint-show').value = thumbprint; + } + else{ + $loginBody.style.display = ''; + $loginSpinner.style.display = 'none'; + $loginSubmit.removeAttribute("disabled"); + document.getElementById('thumbprint-alert-span').textContent = 'code: '+status+' '+thumbprint+', '+'check VC IP/FQDN'; + document.getElementById('thumbprint-alert-div').style.display=''; + } + }) + + } + + if ($pluginForm) { + $pluginForm.addEventListener('submit', function(event) { + event.preventDefault(); + $pluginSubmit.setAttribute('disabled', 'disabled'); + $pluginBody.style.display = 'none'; + $pluginSpinner.style.display = ''; + setTimeout(function() { + $loginForm.submit(); + },2000); + }) + } +} diff --git a/installer/fileserver/html/js/specs/index-handler-spec.js b/installer/fileserver/html/js/specs/index-handler-spec.js new file mode 100755 index 0000000000..27513223e2 --- /dev/null +++ b/installer/fileserver/html/js/specs/index-handler-spec.js @@ -0,0 +1,126 @@ + +describe("login/registration modal behavior", function() { + //responses for getThumbprint + + var responses = { + success: { + status: 200, + response: "69:A1:56:73:A4:B2:78:42:56:69:A1:56:73:A4:B2:78:42:56:69:A1:56:73:A4:B2:78:42:56", + }, + failure: { + status: 500, + response: "Invalid host", + } + }; + + //vc credentials + var credentials = { + target: "1.2.3.4", + user: "admin", + password: "adminuser", + thumbprint: "" + } + + var request; + + //defining fixtures path + beforeAll(function(){ + var path = ''; + if (typeof window.__karma__ !== 'undefined') { + path += 'base/' + } + jasmine.getFixtures().fixturesPath = path + 'js/fixtures'; + preloadFixtures('index.html'); + + }); + + beforeEach(function() { + loadFixtures( 'index.html'); + jasmine.Ajax.install(); + }); + + afterEach(function() { + jasmine.Ajax.uninstall(); + }); + + it ("should be present only the registration modal with their elements", function() { + + //Registration modal is still present + expect(jQuery('#login-modal')).toBeVisible(); + + //Registration form and their elements are present + expect(jQuery('#login-form')).toBeVisible(); + expect(jQuery('#target')).toBeVisible(); + expect(jQuery('#user')).toBeVisible(); + expect(jQuery('input[name=password]')).toBeVisible(); + expect(jQuery('#login-submit')).toBeVisible(); + }); + + it ("Should has disabled the continue button when the fields were empty", function(){ + + jQuery('#target').val(credentials.target); + checkRegistryForm(); + expect(jQuery('#login-submit').prop("disabled")).toBe(true); + + jQuery('#user').val(credentials.user); + checkRegistryForm(); + expect(jQuery('#login-submit').prop("disabled")).toBe(true); + + jQuery('#password').val(credentials.password); + checkRegistryForm(); + expect(jQuery('#login-submit').prop("disabled")).toBe(false); + }); + + it ("should has disabled the continue button when thumbprint input is empty", function(done) { + + jQuery('#login-modal').css('display', 'none'); + jQuery('#plugin-modal').css('display', ''); + + jQuery('#thumbprint-show').val(""); + checkThumbrintInput(); + expect(jQuery('#plugin-submit').prop("disabled")).toBe(true); + + jQuery('#thumbprint-show').val(responses.success.response); + checkThumbrintInput(); + expect(jQuery('#plugin-submit').prop("disabled")).toBe(false); + done(); + }); + + it ("should retrieve thumbprint when click submit button and hide registration modal", function(done) { + var spyEvent = spyOnEvent('#login-submit', 'click'); + jQuery('#target').val(credentials.target); + jQuery('#password').val(credentials.password); + + //Sending thumbprint request + jQuery('#login-submit').click(submitRegistration()); + jQuery('#login-submit').click(); + request = jasmine.Ajax.requests.mostRecent(); + request.respondWith(responses.success); + + //The url of the request must have the target + expect(request.url).toBe('/thumbprint?target='+credentials.target); + //Thumbprint retrieve + expect(jQuery('#thumbprint-show')).toHaveValue(responses.success.response); + //Registration modal hide + expect(jQuery('#login-modal')).toBeHidden(); + done(); + }); + + it ("should remains registration modal when click submit button and thumbrint retrieval fails.", function(done) { + var spyEvent = spyOnEvent('#login-submit', 'click'); + jQuery('#login-submit').click(submitRegistration()); + jQuery('#login-submit').click(); + + request = jasmine.Ajax.requests.mostRecent(); + request.respondWith(responses.failure); + + //Registration modal is still present + expect(jQuery('#login-modal')).toBeVisible(); + //Plugin modal is still hidden + expect(jQuery('#plugin-modal')).toBeHidden(); + //Show thumbprint retrieve failed + expect(jQuery('#thumbprint-alert-div')).toBeVisible(); + done(); + }); + }); + \ No newline at end of file diff --git a/installer/fileserver/html/karma.conf.js b/installer/fileserver/html/karma.conf.js new file mode 100644 index 0000000000..18a3f020c5 --- /dev/null +++ b/installer/fileserver/html/karma.conf.js @@ -0,0 +1,77 @@ +module.exports = function(config) { + config.set({ + + // base path that will be used to resolve all patterns (eg. files, exclude) + basePath: '', + + + // frameworks to use + // available frameworks: https://npmjs.org/browse/keyword/karma-adapter + frameworks: ['jasmine-ajax','jasmine-jquery','jasmine'], + + + // list of files / patterns to load in the browser + files: [ + {pattern: 'js/fixtures/*.html', watched: true, included: false, served: true}, + // code i want to test + 'js/index-handler.js', + // specs + 'js/specs/index-handler-spec.js' + ], + + + // list of files / patterns to exclude + exclude: [ + ], + + + // preprocess matching files before serving them to the browser + // available preprocessors: https://npmjs.org/browse/keyword/karma-preprocessor + preprocessors: { + }, + + + // test results reporter to use + // possible values: 'dots', 'progress' + // available reporters: https://npmjs.org/browse/keyword/karma-reporter + reporters: ['progress'], + + + // web server port + port: 9876, + + + // enable / disable colors in the output (reporters and logs) + colors: true, + + + // level of logging + // possible values: config.LOG_DISABLE || config.LOG_ERROR || config.LOG_WARN || config.LOG_INFO || config.LOG_DEBUG + logLevel: config.LOG_DEBUG, + + + // enable / disable watching file and executing tests whenever any file changes + autoWatch: true, + + + // start these browsers + // available browser launchers: https://npmjs.org/browse/keyword/karma-launcher + browsers: ['PhantomJS'], + + plugins: [ + 'karma-phantomjs-launcher', + 'karma-jasmine', + 'karma-jasmine-jquery', + 'karma-jasmine-ajax' + ], + + + // Continuous Integration mode + // if true, Karma captures browsers, runs the tests and exits + singleRun: true, + + // Concurrency level + // how many browser should be started simultaneous + concurrency: Infinity + }) +} diff --git a/installer/fileserver/html/package-lock.json b/installer/fileserver/html/package-lock.json new file mode 100644 index 0000000000..059143663d --- /dev/null +++ b/installer/fileserver/html/package-lock.json @@ -0,0 +1,5900 @@ +{ + "name": "vic-uis-auto-installer", + "version": "1.0.0", + "lockfileVersion": 1, + "requires": true, + "dependencies": { + "abbrev": { + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-1.0.9.tgz", + "integrity": "sha1-kbR5JYinc4wl813W9jdSovh3YTU=", + "dev": true + }, + "accepts": { + "version": "1.3.5", + "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.5.tgz", + "integrity": "sha1-63d99gEXI6OxTopywIBcjoZ0a9I=", + "dev": true, + "requires": { + "mime-types": "2.1.18", + "negotiator": "0.6.1" + } + }, + "addressparser": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/addressparser/-/addressparser-1.0.1.tgz", + "integrity": "sha1-R6++GiqSYhkdtoOOT9HTm0CCF0Y=", + "dev": true, + "optional": true + }, + "after": { + "version": "0.8.2", + "resolved": "https://registry.npmjs.org/after/-/after-0.8.2.tgz", + "integrity": "sha1-/ts5T58OAqqXaOcCvaI7UF+ufh8=", + "dev": true + }, + "agent-base": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-4.2.0.tgz", + "integrity": "sha512-c+R/U5X+2zz2+UCrCFv6odQzJdoqI+YecuhnAJLa1zYaMc13zPfwMwZrr91Pd1DYNo/yPRbiM4WVf9whgwFsIg==", + "dev": true, + "requires": { + "es6-promisify": "5.0.0" + } + }, + "ajv": { + "version": "5.5.2", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-5.5.2.tgz", + "integrity": "sha1-c7Xuyj+rZT49P5Qis0GtQiBdyWU=", + "dev": true, + "requires": { + "co": "4.6.0", + "fast-deep-equal": "1.1.0", + "fast-json-stable-stringify": "2.0.0", + "json-schema-traverse": "0.3.1" + } + }, + "amdefine": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/amdefine/-/amdefine-1.0.1.tgz", + "integrity": "sha1-SlKCrBZHKek2Gbz9OtFR+BfOkfU=", + "dev": true, + "optional": true + }, + "amqplib": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/amqplib/-/amqplib-0.5.2.tgz", + "integrity": "sha512-l9mCs6LbydtHqRniRwYkKdqxVa6XMz3Vw1fh+2gJaaVgTM6Jk3o8RccAKWKtlhT1US5sWrFh+KKxsVUALURSIA==", + "dev": true, + "optional": true, + "requires": { + "bitsyntax": "0.0.4", + "bluebird": "3.5.1", + "buffer-more-ints": "0.0.2", + "readable-stream": "1.1.14", + "safe-buffer": "5.1.2" + }, + "dependencies": { + "isarray": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", + "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=", + "dev": true, + "optional": true + }, + "readable-stream": { + "version": "1.1.14", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-1.1.14.tgz", + "integrity": "sha1-fPTFTvZI44EwhMY23SB54WbAgdk=", + "dev": true, + "optional": true, + "requires": { + "core-util-is": "1.0.2", + "inherits": "2.0.3", + "isarray": "0.0.1", + "string_decoder": "0.10.31" + } + }, + "string_decoder": { + "version": "0.10.31", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz", + "integrity": "sha1-YuIDvEF2bGwoyfyEMB2rHFMQ+pQ=", + "dev": true, + "optional": true + } + } + }, + "ansi-regex": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz", + "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=", + "dev": true + }, + "ansi-styles": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-2.2.1.tgz", + "integrity": "sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4=", + "dev": true, + "optional": true + }, + "anymatch": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-1.3.2.tgz", + "integrity": "sha512-0XNayC8lTHQ2OI8aljNCN3sSx6hsr/1+rlcDAotXJR7C1oZZHCNsfpbKwMjRA3Uqb5tF1Rae2oloTr4xpq+WjA==", + "dev": true, + "requires": { + "micromatch": "2.3.11", + "normalize-path": "2.1.1" + } + }, + "archy": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/archy/-/archy-0.0.2.tgz", + "integrity": "sha1-kQ9Dv2YUH8M1VkWXq8GJ30Sz014=", + "dev": true + }, + "argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dev": true, + "requires": { + "sprintf-js": "1.0.3" + } + }, + "arr-diff": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/arr-diff/-/arr-diff-2.0.0.tgz", + "integrity": "sha1-jzuCf5Vai9ZpaX5KQlasPOrjVs8=", + "dev": true, + "requires": { + "arr-flatten": "1.1.0" + } + }, + "arr-flatten": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/arr-flatten/-/arr-flatten-1.1.0.tgz", + "integrity": "sha512-L3hKV5R/p5o81R7O02IGnwpDmkp6E982XhtbuwSe3O4qOtMMMtodicASA1Cny2U+aCXcNpml+m4dPsvsJ3jatg==", + "dev": true + }, + "array-filter": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/array-filter/-/array-filter-0.0.1.tgz", + "integrity": "sha1-fajPLiZijtcygDWB/SH2fKzS7uw=", + "dev": true + }, + "array-map": { + "version": "0.0.0", + "resolved": "https://registry.npmjs.org/array-map/-/array-map-0.0.0.tgz", + "integrity": "sha1-iKK6tz0c97zVwbEYoAP2b2ZfpmI=", + "dev": true + }, + "array-reduce": { + "version": "0.0.0", + "resolved": "https://registry.npmjs.org/array-reduce/-/array-reduce-0.0.0.tgz", + "integrity": "sha1-FziZ0//Rx9k4PkR5Ul2+J4yrXys=", + "dev": true + }, + "array-slice": { + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/array-slice/-/array-slice-0.2.3.tgz", + "integrity": "sha1-3Tz7gO15c6dRF82sabC5nshhhvU=", + "dev": true + }, + "array-unique": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/array-unique/-/array-unique-0.2.1.tgz", + "integrity": "sha1-odl8yvy8JiXMcPrc6zalDFiwGlM=", + "dev": true + }, + "arraybuffer.slice": { + "version": "0.0.7", + "resolved": "https://registry.npmjs.org/arraybuffer.slice/-/arraybuffer.slice-0.0.7.tgz", + "integrity": "sha512-wGUIVQXuehL5TCqQun8OW81jGzAWycqzFF8lFp+GOM5BXLYj3bKNsYC4daB7n6XjCqxQA/qgTJ+8ANR3acjrog==", + "dev": true + }, + "asn1": { + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.3.tgz", + "integrity": "sha1-2sh4dxPJlmhJ/IGAd36+nB3fO4Y=", + "dev": true + }, + "assert-plus": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz", + "integrity": "sha1-8S4PPF13sLHN2RRpQuTpbB5N1SU=", + "dev": true + }, + "ast-types": { + "version": "0.11.5", + "resolved": "https://registry.npmjs.org/ast-types/-/ast-types-0.11.5.tgz", + "integrity": "sha512-oJjo+5e7/vEc2FBK8gUalV0pba4L3VdBIs2EKhOLHLcOd2FgQIVQN9xb0eZ9IjEWyAL7vq6fGJxOvVvdCHNyMw==", + "dev": true, + "optional": true + }, + "async": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/async/-/async-2.6.1.tgz", + "integrity": "sha512-fNEiL2+AZt6AlAw/29Cr0UDe4sRAHCpEHh54WMz+Bb7QfNcFw4h3loofyJpLeQs4Yx7yuqu/2dLgM5hKOs6HlQ==", + "dev": true, + "optional": true, + "requires": { + "lodash": "4.17.10" + } + }, + "async-each": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/async-each/-/async-each-1.0.1.tgz", + "integrity": "sha1-GdOGodntxufByF04iu28xW0zYC0=", + "dev": true + }, + "async-limiter": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/async-limiter/-/async-limiter-1.0.0.tgz", + "integrity": "sha512-jp/uFnooOiO+L211eZOoSyzpOITMXx1rBITauYykG3BRYPu8h0UcxsPNB04RR5vo4Tyz3+ay17tR6JVf9qzYWg==", + "dev": true + }, + "asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha1-x57Zf380y48robyXkLzDZkdLS3k=", + "dev": true + }, + "aws-sign2": { + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.7.0.tgz", + "integrity": "sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg=", + "dev": true + }, + "aws4": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.7.0.tgz", + "integrity": "sha512-32NDda82rhwD9/JBCCkB+MRYDp0oSvlo2IL6rQWA10PQi7tDUM3eqMSltXmY+Oyl/7N3P3qNtAlv7X0d9bI28w==", + "dev": true + }, + "axios": { + "version": "0.15.3", + "resolved": "https://registry.npmjs.org/axios/-/axios-0.15.3.tgz", + "integrity": "sha1-LJ1jiy4ZGgjqHWzJiOrda6W9wFM=", + "dev": true, + "optional": true, + "requires": { + "follow-redirects": "1.0.0" + }, + "dependencies": { + "follow-redirects": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.0.0.tgz", + "integrity": "sha1-jjQpjL0uF28lTv/sdaHHjMhJ/Tc=", + "dev": true, + "optional": true, + "requires": { + "debug": "2.6.9" + } + } + } + }, + "backo2": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/backo2/-/backo2-1.0.2.tgz", + "integrity": "sha1-MasayLEpNjRj41s+u2n038+6eUc=", + "dev": true + }, + "balanced-match": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz", + "integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c=", + "dev": true + }, + "base64-arraybuffer": { + "version": "0.1.5", + "resolved": "https://registry.npmjs.org/base64-arraybuffer/-/base64-arraybuffer-0.1.5.tgz", + "integrity": "sha1-c5JncZI7Whl0etZmqlzUv5xunOg=", + "dev": true + }, + "base64id": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/base64id/-/base64id-1.0.0.tgz", + "integrity": "sha1-R2iMuZu2gE8OBtPnY7HDLlfY5rY=", + "dev": true + }, + "bcrypt-pbkdf": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.1.tgz", + "integrity": "sha1-Y7xdy2EzG5K8Bf1SiVPDNGKgb40=", + "dev": true, + "optional": true, + "requires": { + "tweetnacl": "0.14.5" + } + }, + "better-assert": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/better-assert/-/better-assert-1.0.2.tgz", + "integrity": "sha1-QIZrnhueC1W0gYlDEeaPr/rrxSI=", + "dev": true, + "requires": { + "callsite": "1.0.0" + } + }, + "binary": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/binary/-/binary-0.3.0.tgz", + "integrity": "sha1-n2BVO8XOjDOG87VTz/R0Yq3sqnk=", + "dev": true, + "requires": { + "buffers": "0.1.1", + "chainsaw": "0.1.0" + } + }, + "binary-extensions": { + "version": "1.11.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-1.11.0.tgz", + "integrity": "sha1-RqoXUftqL5PuXmibsQh9SxTGwgU=", + "dev": true + }, + "bitsyntax": { + "version": "0.0.4", + "resolved": "https://registry.npmjs.org/bitsyntax/-/bitsyntax-0.0.4.tgz", + "integrity": "sha1-6xDMb4K4xJDj6FaY8H6D1G4MuoI=", + "dev": true, + "optional": true, + "requires": { + "buffer-more-ints": "0.0.2" + } + }, + "bl": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/bl/-/bl-1.1.2.tgz", + "integrity": "sha1-/cqHGplxOqANGeO7ukHER4emU5g=", + "dev": true, + "optional": true, + "requires": { + "readable-stream": "2.0.6" + }, + "dependencies": { + "process-nextick-args": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-1.0.7.tgz", + "integrity": "sha1-FQ4gt1ZZCtP5EJPyWk8q2L/zC6M=", + "dev": true, + "optional": true + }, + "readable-stream": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.0.6.tgz", + "integrity": "sha1-j5A0HmilPMySh4jaz80Rs265t44=", + "dev": true, + "optional": true, + "requires": { + "core-util-is": "1.0.2", + "inherits": "2.0.3", + "isarray": "1.0.0", + "process-nextick-args": "1.0.7", + "string_decoder": "0.10.31", + "util-deprecate": "1.0.2" + } + }, + "string_decoder": { + "version": "0.10.31", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz", + "integrity": "sha1-YuIDvEF2bGwoyfyEMB2rHFMQ+pQ=", + "dev": true, + "optional": true + } + } + }, + "blob": { + "version": "0.0.4", + "resolved": "https://registry.npmjs.org/blob/-/blob-0.0.4.tgz", + "integrity": "sha1-vPEwUspURj8w+fx+lbmkdjCpSSE=", + "dev": true + }, + "bluebird": { + "version": "3.5.1", + "resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.5.1.tgz", + "integrity": "sha512-MKiLiV+I1AA596t9w1sQJ8jkiSr5+ZKi0WKrYGUn6d1Fx+Ij4tIj+m2WMQSGczs5jZVxV339chE8iwk6F64wjA==", + "dev": true + }, + "body-parser": { + "version": "1.18.3", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.18.3.tgz", + "integrity": "sha1-WykhmP/dVTs6DyDe0FkrlWlVyLQ=", + "dev": true, + "requires": { + "bytes": "3.0.0", + "content-type": "1.0.4", + "debug": "2.6.9", + "depd": "1.1.2", + "http-errors": "1.6.3", + "iconv-lite": "0.4.23", + "on-finished": "2.3.0", + "qs": "6.5.2", + "raw-body": "2.3.3", + "type-is": "1.6.16" + } + }, + "boom": { + "version": "2.10.1", + "resolved": "https://registry.npmjs.org/boom/-/boom-2.10.1.tgz", + "integrity": "sha1-OciRjO/1eZ+D+UkqhI9iWt0Mdm8=", + "dev": true, + "requires": { + "hoek": "2.16.3" + } + }, + "bower": { + "version": "1.8.4", + "resolved": "https://registry.npmjs.org/bower/-/bower-1.8.4.tgz", + "integrity": "sha1-54dqB23rgTf30GUl3F6MZtuC8oo=", + "dev": true + }, + "bower-config": { + "version": "0.5.3", + "resolved": "https://registry.npmjs.org/bower-config/-/bower-config-0.5.3.tgz", + "integrity": "sha1-mPxbQah4cO+cu5KXY1z4H1UF/bE=", + "dev": true, + "requires": { + "graceful-fs": "2.0.3", + "mout": "0.9.1", + "optimist": "0.6.1", + "osenv": "0.0.3" + }, + "dependencies": { + "graceful-fs": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-2.0.3.tgz", + "integrity": "sha1-fNLNsiiko/Nule+mzBQt59GhNtA=", + "dev": true + }, + "osenv": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/osenv/-/osenv-0.0.3.tgz", + "integrity": "sha1-zWrY3bKQkVrZ4idlV2Al1BHynLY=", + "dev": true + } + } + }, + "bower-endpoint-parser": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/bower-endpoint-parser/-/bower-endpoint-parser-0.2.2.tgz", + "integrity": "sha1-ALVlrb+rby01rd3pd+l5Yqy8s/Y=", + "dev": true + }, + "bower-json": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/bower-json/-/bower-json-0.4.0.tgz", + "integrity": "sha1-qZw8z0Fu8FkO0N7SUsdg8cbZN2Y=", + "dev": true, + "requires": { + "deep-extend": "0.2.11", + "graceful-fs": "2.0.3", + "intersect": "0.0.3" + }, + "dependencies": { + "graceful-fs": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-2.0.3.tgz", + "integrity": "sha1-fNLNsiiko/Nule+mzBQt59GhNtA=", + "dev": true + } + } + }, + "bower-logger": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/bower-logger/-/bower-logger-0.2.2.tgz", + "integrity": "sha1-Ob4H6Xmy/I4DqUY0IF7ZQiNz04E=", + "dev": true + }, + "bower-registry-client": { + "version": "0.2.4", + "resolved": "https://registry.npmjs.org/bower-registry-client/-/bower-registry-client-0.2.4.tgz", + "integrity": "sha1-Jp/H6Ji2J/uTnRFEpZMlTX+77rw=", + "dev": true, + "requires": { + "async": "0.2.10", + "bower-config": "0.5.3", + "graceful-fs": "2.0.3", + "lru-cache": "2.3.1", + "mkdirp": "0.3.5", + "request": "2.51.0", + "request-replay": "0.2.0", + "rimraf": "2.2.8" + }, + "dependencies": { + "asn1": { + "version": "0.1.11", + "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.1.11.tgz", + "integrity": "sha1-VZvhg3bQik7E2+gId9J4GGObLfc=", + "dev": true + }, + "assert-plus": { + "version": "0.1.5", + "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-0.1.5.tgz", + "integrity": "sha1-7nQAlBMALYTOxyGcasgRgS5yMWA=", + "dev": true + }, + "async": { + "version": "0.2.10", + "resolved": "https://registry.npmjs.org/async/-/async-0.2.10.tgz", + "integrity": "sha1-trvgsGdLnXGXCMo43owjfLUmw9E=", + "dev": true + }, + "aws-sign2": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.5.0.tgz", + "integrity": "sha1-xXED96F/wDfwLXwuZLYC6iI/fWM=", + "dev": true + }, + "bl": { + "version": "0.9.5", + "resolved": "https://registry.npmjs.org/bl/-/bl-0.9.5.tgz", + "integrity": "sha1-wGt5evCF6gC8Unr8jvzxHeIjIFQ=", + "dev": true, + "requires": { + "readable-stream": "1.0.34" + } + }, + "boom": { + "version": "0.4.2", + "resolved": "https://registry.npmjs.org/boom/-/boom-0.4.2.tgz", + "integrity": "sha1-emNune1O/O+xnO9JR6PGffrukRs=", + "dev": true, + "requires": { + "hoek": "0.9.1" + } + }, + "caseless": { + "version": "0.8.0", + "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.8.0.tgz", + "integrity": "sha1-W8oogdQUN/VLJAfr40iIx7mtT30=", + "dev": true + }, + "combined-stream": { + "version": "0.0.7", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-0.0.7.tgz", + "integrity": "sha1-ATfmV7qlp1QcV6w3rF/AfXO03B8=", + "dev": true, + "requires": { + "delayed-stream": "0.0.5" + } + }, + "cryptiles": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/cryptiles/-/cryptiles-0.2.2.tgz", + "integrity": "sha1-7ZH/HxetE9N0gohZT4pIoNJvMlw=", + "dev": true, + "requires": { + "boom": "0.4.2" + } + }, + "delayed-stream": { + "version": "0.0.5", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-0.0.5.tgz", + "integrity": "sha1-1LH0OpPoKW3+AmlPRoC8N6MTxz8=", + "dev": true + }, + "forever-agent": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.5.2.tgz", + "integrity": "sha1-bQ4JxJIflKJ/Y9O0nF/v8epMUTA=", + "dev": true + }, + "form-data": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-0.2.0.tgz", + "integrity": "sha1-Jvi8JtpkQOKZy9z7aQNcT3em5GY=", + "dev": true, + "requires": { + "async": "0.9.2", + "combined-stream": "0.0.7", + "mime-types": "2.0.14" + }, + "dependencies": { + "async": { + "version": "0.9.2", + "resolved": "https://registry.npmjs.org/async/-/async-0.9.2.tgz", + "integrity": "sha1-rqdNXmHB+JlhO/ZL2mbUx48v0X0=", + "dev": true + }, + "mime-types": { + "version": "2.0.14", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.0.14.tgz", + "integrity": "sha1-MQ4VnbI+B3+Lsit0jav6SVcUCqY=", + "dev": true, + "requires": { + "mime-db": "1.12.0" + } + } + } + }, + "graceful-fs": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-2.0.3.tgz", + "integrity": "sha1-fNLNsiiko/Nule+mzBQt59GhNtA=", + "dev": true + }, + "hawk": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/hawk/-/hawk-1.1.1.tgz", + "integrity": "sha1-h81JH5tG5OKurKM1QWdmiF0tHtk=", + "dev": true, + "requires": { + "boom": "0.4.2", + "cryptiles": "0.2.2", + "hoek": "0.9.1", + "sntp": "0.2.4" + } + }, + "hoek": { + "version": "0.9.1", + "resolved": "https://registry.npmjs.org/hoek/-/hoek-0.9.1.tgz", + "integrity": "sha1-PTIkYrrfB3Fup+uFuviAec3c5QU=", + "dev": true + }, + "http-signature": { + "version": "0.10.1", + "resolved": "https://registry.npmjs.org/http-signature/-/http-signature-0.10.1.tgz", + "integrity": "sha1-T72sEyVZqoMjEh5UB3nAoBKyfmY=", + "dev": true, + "requires": { + "asn1": "0.1.11", + "assert-plus": "0.1.5", + "ctype": "0.5.3" + } + }, + "isarray": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", + "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=", + "dev": true + }, + "lru-cache": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-2.3.1.tgz", + "integrity": "sha1-s632s9hW6VTiw5DmzvIggSRaU9Y=", + "dev": true + }, + "mime-db": { + "version": "1.12.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.12.0.tgz", + "integrity": "sha1-PQxjGA9FjrENMlqqN9fFiuMS6dc=", + "dev": true + }, + "mime-types": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-1.0.2.tgz", + "integrity": "sha1-mVrhOSq4r/y/yyZB3QVOlDwNXc4=", + "dev": true + }, + "mkdirp": { + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.3.5.tgz", + "integrity": "sha1-3j5fiWHIjHh+4TaN+EmsRBPsqNc=", + "dev": true + }, + "node-uuid": { + "version": "1.4.8", + "resolved": "https://registry.npmjs.org/node-uuid/-/node-uuid-1.4.8.tgz", + "integrity": "sha1-sEDrCSOWivq/jTL7HxfxFn/auQc=", + "dev": true + }, + "oauth-sign": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.5.0.tgz", + "integrity": "sha1-12f1FpMlYg6rLgh+8MRy53PbZGE=", + "dev": true + }, + "qs": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/qs/-/qs-2.3.3.tgz", + "integrity": "sha1-6eha2+ddoLvkyOBHaghikPhjtAQ=", + "dev": true + }, + "readable-stream": { + "version": "1.0.34", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-1.0.34.tgz", + "integrity": "sha1-Elgg40vIQtLyqq+v5MKRbuMsFXw=", + "dev": true, + "requires": { + "core-util-is": "1.0.2", + "inherits": "2.0.3", + "isarray": "0.0.1", + "string_decoder": "0.10.31" + } + }, + "request": { + "version": "2.51.0", + "resolved": "https://registry.npmjs.org/request/-/request-2.51.0.tgz", + "integrity": "sha1-NdALvswBLlX5B7G9ng29V3v+8m4=", + "dev": true, + "requires": { + "aws-sign2": "0.5.0", + "bl": "0.9.5", + "caseless": "0.8.0", + "combined-stream": "0.0.7", + "forever-agent": "0.5.2", + "form-data": "0.2.0", + "hawk": "1.1.1", + "http-signature": "0.10.1", + "json-stringify-safe": "5.0.1", + "mime-types": "1.0.2", + "node-uuid": "1.4.8", + "oauth-sign": "0.5.0", + "qs": "2.3.3", + "stringstream": "0.0.6", + "tough-cookie": "2.3.4", + "tunnel-agent": "0.4.3" + } + }, + "rimraf": { + "version": "2.2.8", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.2.8.tgz", + "integrity": "sha1-5Dm+Kq7jJzIZUnMPmaiSnk/FBYI=", + "dev": true + }, + "sntp": { + "version": "0.2.4", + "resolved": "https://registry.npmjs.org/sntp/-/sntp-0.2.4.tgz", + "integrity": "sha1-+4hfGLDzqtGJ+CSGJTa87ux1CQA=", + "dev": true, + "requires": { + "hoek": "0.9.1" + } + }, + "string_decoder": { + "version": "0.10.31", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz", + "integrity": "sha1-YuIDvEF2bGwoyfyEMB2rHFMQ+pQ=", + "dev": true + }, + "tunnel-agent": { + "version": "0.4.3", + "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.4.3.tgz", + "integrity": "sha1-Y3PbdpCf5XDgjXNYM2Xtgop07us=", + "dev": true + } + } + }, + "brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "requires": { + "balanced-match": "1.0.0", + "concat-map": "0.0.1" + } + }, + "braces": { + "version": "1.8.5", + "resolved": "https://registry.npmjs.org/braces/-/braces-1.8.5.tgz", + "integrity": "sha1-uneWLhLf+WnWt2cR6RS3N4V79qc=", + "dev": true, + "requires": { + "expand-range": "1.8.2", + "preserve": "0.2.0", + "repeat-element": "1.1.2" + } + }, + "buffer-from": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.0.tgz", + "integrity": "sha512-c5mRlguI/Pe2dSZmpER62rSCu0ryKmWddzRYsuXc50U2/g8jMOulc31VZMa4mYx31U5xsmSOpDCgH88Vl9cDGQ==", + "dev": true + }, + "buffer-more-ints": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/buffer-more-ints/-/buffer-more-ints-0.0.2.tgz", + "integrity": "sha1-JrOIXRD6E9t/wBquOquHAZngEkw=", + "dev": true + }, + "buffers": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/buffers/-/buffers-0.1.1.tgz", + "integrity": "sha1-skV5w77U1tOWru5tmorn9Ugqt7s=", + "dev": true + }, + "buildmail": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/buildmail/-/buildmail-4.0.1.tgz", + "integrity": "sha1-h393OLeHKYccmhBeO4N9K+EaenI=", + "dev": true, + "optional": true, + "requires": { + "addressparser": "1.0.1", + "libbase64": "0.1.0", + "libmime": "3.0.0", + "libqp": "1.1.0", + "nodemailer-fetch": "1.6.0", + "nodemailer-shared": "1.1.0", + "punycode": "1.4.1" + } + }, + "bytes": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz", + "integrity": "sha1-0ygVQE1olpn4Wk6k+odV3ROpYEg=", + "dev": true + }, + "callsite": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/callsite/-/callsite-1.0.0.tgz", + "integrity": "sha1-KAOY5dZkvXQDi28JBRU+borxvCA=", + "dev": true + }, + "cardinal": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/cardinal/-/cardinal-0.4.0.tgz", + "integrity": "sha1-fRCq+yCDe94EPEXkOgyMKM2q5F4=", + "dev": true, + "requires": { + "redeyed": "0.4.4" + } + }, + "caseless": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz", + "integrity": "sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw=", + "dev": true + }, + "chainsaw": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/chainsaw/-/chainsaw-0.1.0.tgz", + "integrity": "sha1-XqtQsor+WAdNDVgpE4iCi15fvJg=", + "dev": true, + "requires": { + "traverse": "0.3.9" + } + }, + "chalk": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-1.1.3.tgz", + "integrity": "sha1-qBFcVeSnAv5NFQq9OHKCKn4J/Jg=", + "dev": true, + "optional": true, + "requires": { + "ansi-styles": "2.2.1", + "escape-string-regexp": "1.0.5", + "has-ansi": "2.0.0", + "strip-ansi": "3.0.1", + "supports-color": "2.0.0" + } + }, + "chmodr": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/chmodr/-/chmodr-0.1.0.tgz", + "integrity": "sha1-4JIVodUVQtsqJXaWl2W89hJVg+s=", + "dev": true + }, + "chokidar": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-1.7.0.tgz", + "integrity": "sha1-eY5ol3gVHIB2tLNg5e3SjNortGg=", + "dev": true, + "requires": { + "anymatch": "1.3.2", + "async-each": "1.0.1", + "fsevents": "1.2.4", + "glob-parent": "2.0.0", + "inherits": "2.0.3", + "is-binary-path": "1.0.1", + "is-glob": "2.0.1", + "path-is-absolute": "1.0.1", + "readdirp": "2.1.0" + } + }, + "circular-json": { + "version": "0.5.4", + "resolved": "https://registry.npmjs.org/circular-json/-/circular-json-0.5.4.tgz", + "integrity": "sha512-vnJA8KS0BfOihugYEUkLRcnmq21FbuivbxgzDLXNs3zIk4KllV4Mx4UuTzBXht9F00C7QfD1YqMXg1zP6EXpig==", + "dev": true + }, + "cli-color": { + "version": "0.3.3", + "resolved": "https://registry.npmjs.org/cli-color/-/cli-color-0.3.3.tgz", + "integrity": "sha1-EtW90Vj/igsNtAEZiRPAPfBp9vU=", + "dev": true, + "requires": { + "d": "0.1.1", + "es5-ext": "0.10.44", + "memoizee": "0.3.10", + "timers-ext": "0.1.5" + } + }, + "co": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/co/-/co-4.6.0.tgz", + "integrity": "sha1-bqa989hTrlTMuOR7+gvz+QMfsYQ=", + "dev": true + }, + "colors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/colors/-/colors-1.3.0.tgz", + "integrity": "sha512-EDpX3a7wHMWFA7PUHWPHNWqOxIIRSJetuwl0AS5Oi/5FMV8kWm69RTlgm00GKjBO1xFHMtBbL49yRtMMdticBw==", + "dev": true + }, + "combine-lists": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/combine-lists/-/combine-lists-1.0.1.tgz", + "integrity": "sha1-RYwH4J4NkA/Ci3Cj/sLazR0st/Y=", + "dev": true, + "requires": { + "lodash": "4.17.10" + } + }, + "combined-stream": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.6.tgz", + "integrity": "sha1-cj599ugBrFYTETp+RFqbactjKBg=", + "dev": true, + "requires": { + "delayed-stream": "1.0.0" + } + }, + "commander": { + "version": "2.15.1", + "resolved": "https://registry.npmjs.org/commander/-/commander-2.15.1.tgz", + "integrity": "sha512-VlfT9F3V0v+jr4yxPc5gg9s62/fIVWsd2Bk2iD435um1NlGMYdVCq+MjcXnhYq2icNOizHr1kK+5TI6H0Hy0ag==", + "dev": true, + "optional": true + }, + "component-bind": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/component-bind/-/component-bind-1.0.0.tgz", + "integrity": "sha1-AMYIq33Nk4l8AAllGx06jh5zu9E=", + "dev": true + }, + "component-emitter": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.2.1.tgz", + "integrity": "sha1-E3kY1teCg/ffemt8WmPhQOaUJeY=", + "dev": true + }, + "component-inherit": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/component-inherit/-/component-inherit-0.0.3.tgz", + "integrity": "sha1-ZF/ErfWLcrZJ1crmUTVhnbJv8UM=", + "dev": true + }, + "concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=", + "dev": true + }, + "concat-stream": { + "version": "1.6.2", + "resolved": "https://registry.npmjs.org/concat-stream/-/concat-stream-1.6.2.tgz", + "integrity": "sha512-27HBghJxjiZtIk3Ycvn/4kbJk/1uZuJFfuPEns6LaEvpvG1f0hTea8lilrouyo9mVc2GWdcEZ8OLoGmSADlrCw==", + "dev": true, + "requires": { + "buffer-from": "1.1.0", + "inherits": "2.0.3", + "readable-stream": "2.3.6", + "typedarray": "0.0.6" + } + }, + "config-chain": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/config-chain/-/config-chain-1.1.11.tgz", + "integrity": "sha1-q6CXR9++TD5w52am5BWG4YWfxvI=", + "dev": true, + "requires": { + "ini": "1.3.5", + "proto-list": "1.2.4" + } + }, + "configstore": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/configstore/-/configstore-0.3.2.tgz", + "integrity": "sha1-JeTBbDdoq/dcWmW8YXYfSVBVtFk=", + "dev": true, + "requires": { + "graceful-fs": "3.0.11", + "js-yaml": "3.11.0", + "mkdirp": "0.5.1", + "object-assign": "2.1.1", + "osenv": "0.1.0", + "user-home": "1.1.1", + "uuid": "2.0.3", + "xdg-basedir": "1.0.1" + }, + "dependencies": { + "graceful-fs": { + "version": "3.0.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-3.0.11.tgz", + "integrity": "sha1-dhPHeKGv6mLyXGMKCG1/Osu92Bg=", + "dev": true, + "requires": { + "natives": "1.1.4" + } + }, + "object-assign": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-2.1.1.tgz", + "integrity": "sha1-Q8NuXVaf+OSBbE76i+AtJpZ8GKo=", + "dev": true + }, + "uuid": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-2.0.3.tgz", + "integrity": "sha1-Z+LoY3lyFVMN/zGOW/nc6/1Hsho=", + "dev": true + } + } + }, + "connect": { + "version": "3.6.6", + "resolved": "https://registry.npmjs.org/connect/-/connect-3.6.6.tgz", + "integrity": "sha1-Ce/2xVr3I24TcTWnJXSFi2eG9SQ=", + "dev": true, + "requires": { + "debug": "2.6.9", + "finalhandler": "1.1.0", + "parseurl": "1.3.2", + "utils-merge": "1.0.1" + } + }, + "content-type": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz", + "integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA==", + "dev": true + }, + "cookie": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.3.1.tgz", + "integrity": "sha1-5+Ch+e9DtMi6klxcWpboBtFoc7s=", + "dev": true + }, + "core-js": { + "version": "2.5.7", + "resolved": "https://registry.npmjs.org/core-js/-/core-js-2.5.7.tgz", + "integrity": "sha512-RszJCAxg/PP6uzXVXL6BsxSXx/B05oJAQ2vkJRjyjrEcNVycaqOmNb5OTxZPE3xa5gwZduqza6L9JOCenh/Ecw==", + "dev": true + }, + "core-util-is": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", + "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=", + "dev": true + }, + "cryptiles": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/cryptiles/-/cryptiles-2.0.5.tgz", + "integrity": "sha1-O9/s3GCBR8HGcgL6KR59ylnqo7g=", + "dev": true, + "optional": true, + "requires": { + "boom": "2.10.1" + } + }, + "ctype": { + "version": "0.5.3", + "resolved": "https://registry.npmjs.org/ctype/-/ctype-0.5.3.tgz", + "integrity": "sha1-gsGMJGH3QRTvFsE1IkrQuRRMoS8=", + "dev": true + }, + "custom-event": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/custom-event/-/custom-event-1.0.1.tgz", + "integrity": "sha1-XQKkaFCt8bSjF5RqOSj8y1v9BCU=", + "dev": true + }, + "d": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/d/-/d-0.1.1.tgz", + "integrity": "sha1-2hhMU10Y2O57oqoim5FACfrhEwk=", + "dev": true, + "requires": { + "es5-ext": "0.10.44" + } + }, + "dashdash": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz", + "integrity": "sha1-hTz6D3y+L+1d4gMmuN1YEDX24vA=", + "dev": true, + "requires": { + "assert-plus": "1.0.0" + } + }, + "data-uri-to-buffer": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/data-uri-to-buffer/-/data-uri-to-buffer-1.2.0.tgz", + "integrity": "sha512-vKQ9DTQPN1FLYiiEEOQ6IBGFqvjCa5rSK3cWMy/Nespm5d/x3dGFT9UBZnkLxCwua/IXBi2TYnwTEpsOvhC4UQ==", + "dev": true, + "optional": true + }, + "date-format": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/date-format/-/date-format-1.2.0.tgz", + "integrity": "sha1-YV6CjiM90aubua4JUODOzPpuytg=", + "dev": true + }, + "debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "dev": true, + "requires": { + "ms": "2.0.0" + } + }, + "decompress-zip": { + "version": "0.0.8", + "resolved": "https://registry.npmjs.org/decompress-zip/-/decompress-zip-0.0.8.tgz", + "integrity": "sha1-SiZbIseyCdeyT6ZvKy37ztWQRPM=", + "dev": true, + "requires": { + "binary": "0.3.0", + "graceful-fs": "3.0.11", + "mkpath": "0.1.0", + "nopt": "2.2.1", + "q": "1.0.1", + "readable-stream": "1.1.14", + "touch": "0.0.2" + }, + "dependencies": { + "graceful-fs": { + "version": "3.0.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-3.0.11.tgz", + "integrity": "sha1-dhPHeKGv6mLyXGMKCG1/Osu92Bg=", + "dev": true, + "requires": { + "natives": "1.1.4" + } + }, + "isarray": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", + "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=", + "dev": true + }, + "nopt": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/nopt/-/nopt-2.2.1.tgz", + "integrity": "sha1-KqCbfRdoSHs7ianFqlIzW/8Lrqc=", + "dev": true, + "requires": { + "abbrev": "1.0.9" + } + }, + "readable-stream": { + "version": "1.1.14", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-1.1.14.tgz", + "integrity": "sha1-fPTFTvZI44EwhMY23SB54WbAgdk=", + "dev": true, + "requires": { + "core-util-is": "1.0.2", + "inherits": "2.0.3", + "isarray": "0.0.1", + "string_decoder": "0.10.31" + } + }, + "string_decoder": { + "version": "0.10.31", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz", + "integrity": "sha1-YuIDvEF2bGwoyfyEMB2rHFMQ+pQ=", + "dev": true + } + } + }, + "deep-extend": { + "version": "0.2.11", + "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.2.11.tgz", + "integrity": "sha1-eha6aXKRMjQFBhcElLyD9wdv4I8=", + "dev": true + }, + "deep-is": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.3.tgz", + "integrity": "sha1-s2nW+128E+7PUk+RsHD+7cNXzzQ=", + "dev": true, + "optional": true + }, + "degenerator": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/degenerator/-/degenerator-1.0.4.tgz", + "integrity": "sha1-/PSQo37OJmRk2cxDGrmMWBnO0JU=", + "dev": true, + "optional": true, + "requires": { + "ast-types": "0.11.5", + "escodegen": "1.9.1", + "esprima": "3.1.3" + } + }, + "delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha1-3zrhmayt+31ECqrgsp4icrJOxhk=", + "dev": true + }, + "depd": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz", + "integrity": "sha1-m81S4UwJd2PnSbJ0xDRu0uVgtak=", + "dev": true + }, + "di": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/di/-/di-0.0.1.tgz", + "integrity": "sha1-gGZJMmzqp8qjMG112YXqJ0i6kTw=", + "dev": true + }, + "dom-serialize": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/dom-serialize/-/dom-serialize-2.2.1.tgz", + "integrity": "sha1-ViromZ9Evl6jB29UGdzVnrQ6yVs=", + "dev": true, + "requires": { + "custom-event": "1.0.1", + "ent": "2.2.0", + "extend": "3.0.1", + "void-elements": "2.0.1" + } + }, + "double-ended-queue": { + "version": "2.1.0-0", + "resolved": "https://registry.npmjs.org/double-ended-queue/-/double-ended-queue-2.1.0-0.tgz", + "integrity": "sha1-ED01J/0xUo9AGIEwyEHv3XgmTlw=", + "dev": true, + "optional": true + }, + "ecc-jsbn": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.1.tgz", + "integrity": "sha1-D8c6ntXw1Tw4GTOYUj735UN3dQU=", + "dev": true, + "optional": true, + "requires": { + "jsbn": "0.1.1" + } + }, + "ee-first": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", + "integrity": "sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0=", + "dev": true + }, + "encodeurl": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", + "integrity": "sha1-rT/0yG7C0CkyL1oCw6mmBslbP1k=", + "dev": true + }, + "end-of-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.0.0.tgz", + "integrity": "sha1-1FlucCc0qT5A6a+GQxnqvZn/Lw4=", + "dev": true, + "requires": { + "once": "1.3.3" + }, + "dependencies": { + "once": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/once/-/once-1.3.3.tgz", + "integrity": "sha1-suJhVXzkwxTsgwTz+oJmPkKXyiA=", + "dev": true, + "requires": { + "wrappy": "1.0.2" + } + } + } + }, + "engine.io": { + "version": "3.1.5", + "resolved": "https://registry.npmjs.org/engine.io/-/engine.io-3.1.5.tgz", + "integrity": "sha512-D06ivJkYxyRrcEe0bTpNnBQNgP9d3xog+qZlLbui8EsMr/DouQpf5o9FzJnWYHEYE0YsFHllUv2R1dkgYZXHcA==", + "dev": true, + "requires": { + "accepts": "1.3.5", + "base64id": "1.0.0", + "cookie": "0.3.1", + "debug": "3.1.0", + "engine.io-parser": "2.1.2", + "uws": "9.14.0", + "ws": "3.3.3" + }, + "dependencies": { + "debug": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.1.0.tgz", + "integrity": "sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g==", + "dev": true, + "requires": { + "ms": "2.0.0" + } + } + } + }, + "engine.io-client": { + "version": "3.1.6", + "resolved": "https://registry.npmjs.org/engine.io-client/-/engine.io-client-3.1.6.tgz", + "integrity": "sha512-hnuHsFluXnsKOndS4Hv6SvUrgdYx1pk2NqfaDMW+GWdgfU3+/V25Cj7I8a0x92idSpa5PIhJRKxPvp9mnoLsfg==", + "dev": true, + "requires": { + "component-emitter": "1.2.1", + "component-inherit": "0.0.3", + "debug": "3.1.0", + "engine.io-parser": "2.1.2", + "has-cors": "1.1.0", + "indexof": "0.0.1", + "parseqs": "0.0.5", + "parseuri": "0.0.5", + "ws": "3.3.3", + "xmlhttprequest-ssl": "1.5.5", + "yeast": "0.1.2" + }, + "dependencies": { + "debug": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.1.0.tgz", + "integrity": "sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g==", + "dev": true, + "requires": { + "ms": "2.0.0" + } + } + } + }, + "engine.io-parser": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/engine.io-parser/-/engine.io-parser-2.1.2.tgz", + "integrity": "sha512-dInLFzr80RijZ1rGpx1+56/uFoH7/7InhH3kZt+Ms6hT8tNx3NGW/WNSA/f8As1WkOfkuyb3tnRyuXGxusclMw==", + "dev": true, + "requires": { + "after": "0.8.2", + "arraybuffer.slice": "0.0.7", + "base64-arraybuffer": "0.1.5", + "blob": "0.0.4", + "has-binary2": "1.0.3" + } + }, + "ent": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/ent/-/ent-2.2.0.tgz", + "integrity": "sha1-6WQhkyWiHQX0RGai9obtbOX13R0=", + "dev": true + }, + "es5-ext": { + "version": "0.10.44", + "resolved": "https://registry.npmjs.org/es5-ext/-/es5-ext-0.10.44.tgz", + "integrity": "sha512-TO4Vt9IhW3FzDKLDOpoA8VS9BCV4b9WTf6BqvMOgfoa8wX73F3Kh3y2J7yTstTaXlQ0k1vq4DH2vw6RSs42z+g==", + "dev": true, + "requires": { + "es6-iterator": "2.0.3", + "es6-symbol": "3.1.1", + "next-tick": "1.0.0" + } + }, + "es6-iterator": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/es6-iterator/-/es6-iterator-2.0.3.tgz", + "integrity": "sha1-p96IkUGgWpSwhUQDstCg+/qY87c=", + "dev": true, + "requires": { + "d": "1.0.0", + "es5-ext": "0.10.44", + "es6-symbol": "3.1.1" + }, + "dependencies": { + "d": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/d/-/d-1.0.0.tgz", + "integrity": "sha1-dUu1v+VUUdpppYuU1F9MWwRi1Y8=", + "dev": true, + "requires": { + "es5-ext": "0.10.44" + } + } + } + }, + "es6-promise": { + "version": "4.2.4", + "resolved": "https://registry.npmjs.org/es6-promise/-/es6-promise-4.2.4.tgz", + "integrity": "sha512-/NdNZVJg+uZgtm9eS3O6lrOLYmQag2DjdEXuPaHlZ6RuVqgqaVZfgYCepEIKsLqwdQArOPtC3XzRLqGGfT8KQQ==", + "dev": true + }, + "es6-promisify": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/es6-promisify/-/es6-promisify-5.0.0.tgz", + "integrity": "sha1-UQnWLz5W6pZ8S2NQWu8IKRyKUgM=", + "dev": true, + "requires": { + "es6-promise": "4.2.4" + } + }, + "es6-symbol": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/es6-symbol/-/es6-symbol-3.1.1.tgz", + "integrity": "sha1-vwDvT9q2uhtG7Le2KbTH7VcVzHc=", + "dev": true, + "requires": { + "d": "1.0.0", + "es5-ext": "0.10.44" + }, + "dependencies": { + "d": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/d/-/d-1.0.0.tgz", + "integrity": "sha1-dUu1v+VUUdpppYuU1F9MWwRi1Y8=", + "dev": true, + "requires": { + "es5-ext": "0.10.44" + } + } + } + }, + "es6-weak-map": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/es6-weak-map/-/es6-weak-map-0.1.4.tgz", + "integrity": "sha1-cGzvnpmqI2undmwjnIueKG6n0ig=", + "dev": true, + "requires": { + "d": "0.1.1", + "es5-ext": "0.10.44", + "es6-iterator": "0.1.3", + "es6-symbol": "2.0.1" + }, + "dependencies": { + "es6-iterator": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/es6-iterator/-/es6-iterator-0.1.3.tgz", + "integrity": "sha1-1vWLjE/EE8JJtLqhl2j45NfIlE4=", + "dev": true, + "requires": { + "d": "0.1.1", + "es5-ext": "0.10.44", + "es6-symbol": "2.0.1" + } + }, + "es6-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/es6-symbol/-/es6-symbol-2.0.1.tgz", + "integrity": "sha1-dhtcZ8/U8dGK+yNPaR1nhoLLO/M=", + "dev": true, + "requires": { + "d": "0.1.1", + "es5-ext": "0.10.44" + } + } + } + }, + "escape-html": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", + "integrity": "sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg=", + "dev": true + }, + "escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=", + "dev": true + }, + "escodegen": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/escodegen/-/escodegen-1.9.1.tgz", + "integrity": "sha512-6hTjO1NAWkHnDk3OqQ4YrCuwwmGHL9S3nPlzBOUG/R44rda3wLNrfvQ5fkSGjyhHFKM7ALPKcKGrwvCLe0lC7Q==", + "dev": true, + "optional": true, + "requires": { + "esprima": "3.1.3", + "estraverse": "4.2.0", + "esutils": "2.0.2", + "optionator": "0.8.2", + "source-map": "0.6.1" + } + }, + "esprima": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-3.1.3.tgz", + "integrity": "sha1-/cpRzuYTOJXjyI1TXOSdv/YqRjM=", + "dev": true + }, + "estraverse": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.2.0.tgz", + "integrity": "sha1-De4/7TH81GlhjOc0IJn8GvoL2xM=", + "dev": true, + "optional": true + }, + "esutils": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.2.tgz", + "integrity": "sha1-Cr9PHKpbyx96nYrMbepPqqBLrJs=", + "dev": true, + "optional": true + }, + "event-emitter": { + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/event-emitter/-/event-emitter-0.3.5.tgz", + "integrity": "sha1-34xp7vFkeSPHFXuc6DhAYQsCzDk=", + "dev": true, + "requires": { + "d": "1.0.0", + "es5-ext": "0.10.44" + }, + "dependencies": { + "d": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/d/-/d-1.0.0.tgz", + "integrity": "sha1-dUu1v+VUUdpppYuU1F9MWwRi1Y8=", + "dev": true, + "requires": { + "es5-ext": "0.10.44" + } + } + } + }, + "eventemitter3": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-3.1.0.tgz", + "integrity": "sha512-ivIvhpq/Y0uSjcHDcOIccjmYjGLcP09MFGE7ysAwkAvkXfpZlC985pH2/ui64DKazbTW/4kN3yqozUxlXzI6cA==", + "dev": true + }, + "expand-braces": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/expand-braces/-/expand-braces-0.1.2.tgz", + "integrity": "sha1-SIsdHSRRyz06axks/AMPRMWFX+o=", + "dev": true, + "requires": { + "array-slice": "0.2.3", + "array-unique": "0.2.1", + "braces": "0.1.5" + }, + "dependencies": { + "braces": { + "version": "0.1.5", + "resolved": "https://registry.npmjs.org/braces/-/braces-0.1.5.tgz", + "integrity": "sha1-wIVxEIUpHYt1/ddOqw+FlygHEeY=", + "dev": true, + "requires": { + "expand-range": "0.1.1" + } + }, + "expand-range": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/expand-range/-/expand-range-0.1.1.tgz", + "integrity": "sha1-TLjtoJk8pW+k9B/ELzy7TMrf8EQ=", + "dev": true, + "requires": { + "is-number": "0.1.1", + "repeat-string": "0.2.2" + } + }, + "is-number": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-0.1.1.tgz", + "integrity": "sha1-aaevEWlj1HIG7JvZtIoUIW8eOAY=", + "dev": true + }, + "repeat-string": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-0.2.2.tgz", + "integrity": "sha1-x6jTI2BoNiBZp+RlH8aITosftK4=", + "dev": true + } + } + }, + "expand-brackets": { + "version": "0.1.5", + "resolved": "https://registry.npmjs.org/expand-brackets/-/expand-brackets-0.1.5.tgz", + "integrity": "sha1-3wcoTjQqgHzXM6xa9yQR5YHRF3s=", + "dev": true, + "requires": { + "is-posix-bracket": "0.1.1" + } + }, + "expand-range": { + "version": "1.8.2", + "resolved": "https://registry.npmjs.org/expand-range/-/expand-range-1.8.2.tgz", + "integrity": "sha1-opnv/TNf4nIeuujiV+x5ZE/IUzc=", + "dev": true, + "requires": { + "fill-range": "2.2.4" + } + }, + "extend": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.1.tgz", + "integrity": "sha1-p1Xqe8Gt/MWjHOfnYtuq3F5jZEQ=", + "dev": true + }, + "extglob": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/extglob/-/extglob-0.3.2.tgz", + "integrity": "sha1-Lhj/PS9JqydlzskCPwEdqo2DSaE=", + "dev": true, + "requires": { + "is-extglob": "1.0.0" + } + }, + "extract-zip": { + "version": "1.6.7", + "resolved": "https://registry.npmjs.org/extract-zip/-/extract-zip-1.6.7.tgz", + "integrity": "sha1-qEC0uK9kAyZMjbV/Txp0Mz74H+k=", + "dev": true, + "requires": { + "concat-stream": "1.6.2", + "debug": "2.6.9", + "mkdirp": "0.5.1", + "yauzl": "2.4.1" + } + }, + "extsprintf": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz", + "integrity": "sha1-lpGEQOMEGnpBT4xS48V06zw+HgU=", + "dev": true + }, + "fast-deep-equal": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-1.1.0.tgz", + "integrity": "sha1-wFNHeBfIa1HaqFPIHgWbcz0CNhQ=", + "dev": true + }, + "fast-json-stable-stringify": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.0.0.tgz", + "integrity": "sha1-1RQsDK7msRifh9OnYREGT4bIu/I=", + "dev": true + }, + "fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc=", + "dev": true, + "optional": true + }, + "fd-slicer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/fd-slicer/-/fd-slicer-1.0.1.tgz", + "integrity": "sha1-i1vL2ewyfFBBv5qwI/1nUPEXfmU=", + "dev": true, + "requires": { + "pend": "1.2.0" + } + }, + "figures": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/figures/-/figures-1.7.0.tgz", + "integrity": "sha1-y+Hjr/zxzUS4DK3+0o3Hk6lwHS4=", + "dev": true, + "requires": { + "escape-string-regexp": "1.0.5", + "object-assign": "4.1.1" + } + }, + "file-uri-to-path": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/file-uri-to-path/-/file-uri-to-path-1.0.0.tgz", + "integrity": "sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==", + "dev": true, + "optional": true + }, + "filename-regex": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/filename-regex/-/filename-regex-2.0.1.tgz", + "integrity": "sha1-wcS5vuPglyXdsQa3XB4wH+LxiyY=", + "dev": true + }, + "fill-range": { + "version": "2.2.4", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-2.2.4.tgz", + "integrity": "sha512-cnrcCbj01+j2gTG921VZPnHbjmdAf8oQV/iGeV2kZxGSyfYjjTyY79ErsK1WJWMpw6DaApEX72binqJE+/d+5Q==", + "dev": true, + "requires": { + "is-number": "2.1.0", + "isobject": "2.1.0", + "randomatic": "3.0.0", + "repeat-element": "1.1.2", + "repeat-string": "1.6.1" + } + }, + "finalhandler": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.1.0.tgz", + "integrity": "sha1-zgtoVbRYU+eRsvzGgARtiCU91/U=", + "dev": true, + "requires": { + "debug": "2.6.9", + "encodeurl": "1.0.2", + "escape-html": "1.0.3", + "on-finished": "2.3.0", + "parseurl": "1.3.2", + "statuses": "1.3.1", + "unpipe": "1.0.0" + }, + "dependencies": { + "statuses": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.3.1.tgz", + "integrity": "sha1-+vUbnrdKrvOzrPStX2Gr8ky3uT4=", + "dev": true + } + } + }, + "follow-redirects": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.5.0.tgz", + "integrity": "sha512-fdrt472/9qQ6Kgjvb935ig6vJCuofpBUD14f9Vb+SLlm7xIe4Qva5gey8EKtv8lp7ahE1wilg3xL1znpVGtZIA==", + "dev": true, + "requires": { + "debug": "3.1.0" + }, + "dependencies": { + "debug": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.1.0.tgz", + "integrity": "sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g==", + "dev": true, + "requires": { + "ms": "2.0.0" + } + } + } + }, + "for-in": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/for-in/-/for-in-1.0.2.tgz", + "integrity": "sha1-gQaNKVqBQuwKxybG4iAMMPttXoA=", + "dev": true + }, + "for-own": { + "version": "0.1.5", + "resolved": "https://registry.npmjs.org/for-own/-/for-own-0.1.5.tgz", + "integrity": "sha1-UmXGgaTylNq78XyVCbZ2OqhFEM4=", + "dev": true, + "requires": { + "for-in": "1.0.2" + } + }, + "forever-agent": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz", + "integrity": "sha1-+8cfDEGt6zf5bFd60e1C2P2sypE=", + "dev": true + }, + "form-data": { + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.3.2.tgz", + "integrity": "sha1-SXBJi+YEwgwAXU9cI67NIda0kJk=", + "dev": true, + "requires": { + "asynckit": "0.4.0", + "combined-stream": "1.0.6", + "mime-types": "2.1.18" + } + }, + "fs-extra": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-1.0.0.tgz", + "integrity": "sha1-zTzl9+fLYUWIP8rjGR6Yd/hYeVA=", + "dev": true, + "requires": { + "graceful-fs": "4.1.11", + "jsonfile": "2.4.0", + "klaw": "1.3.1" + } + }, + "fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=", + "dev": true + }, + "fsevents": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-1.2.4.tgz", + "integrity": "sha512-z8H8/diyk76B7q5wg+Ud0+CqzcAF3mBBI/bA5ne5zrRUUIvNkJY//D3BqyH571KuAC4Nr7Rw7CjWX4r0y9DvNg==", + "dev": true, + "optional": true, + "requires": { + "nan": "2.10.0", + "node-pre-gyp": "0.10.0" + }, + "dependencies": { + "abbrev": { + "version": "1.1.1", + "bundled": true, + "optional": true + }, + "ansi-regex": { + "version": "2.1.1", + "bundled": true + }, + "aproba": { + "version": "1.2.0", + "bundled": true, + "optional": true + }, + "are-we-there-yet": { + "version": "1.1.4", + "bundled": true, + "optional": true, + "requires": { + "delegates": "1.0.0", + "readable-stream": "2.3.6" + } + }, + "balanced-match": { + "version": "1.0.0", + "bundled": true + }, + "brace-expansion": { + "version": "1.1.11", + "bundled": true, + "requires": { + "balanced-match": "1.0.0", + "concat-map": "0.0.1" + } + }, + "chownr": { + "version": "1.0.1", + "bundled": true, + "optional": true + }, + "code-point-at": { + "version": "1.1.0", + "bundled": true + }, + "concat-map": { + "version": "0.0.1", + "bundled": true + }, + "console-control-strings": { + "version": "1.1.0", + "bundled": true + }, + "core-util-is": { + "version": "1.0.2", + "bundled": true, + "optional": true + }, + "debug": { + "version": "2.6.9", + "bundled": true, + "optional": true, + "requires": { + "ms": "2.0.0" + } + }, + "deep-extend": { + "version": "0.5.1", + "bundled": true, + "optional": true + }, + "delegates": { + "version": "1.0.0", + "bundled": true, + "optional": true + }, + "detect-libc": { + "version": "1.0.3", + "bundled": true, + "optional": true + }, + "fs-minipass": { + "version": "1.2.5", + "bundled": true, + "optional": true, + "requires": { + "minipass": "2.2.4" + } + }, + "fs.realpath": { + "version": "1.0.0", + "bundled": true, + "optional": true + }, + "gauge": { + "version": "2.7.4", + "bundled": true, + "optional": true, + "requires": { + "aproba": "1.2.0", + "console-control-strings": "1.1.0", + "has-unicode": "2.0.1", + "object-assign": "4.1.1", + "signal-exit": "3.0.2", + "string-width": "1.0.2", + "strip-ansi": "3.0.1", + "wide-align": "1.1.2" + } + }, + "glob": { + "version": "7.1.2", + "bundled": true, + "optional": true, + "requires": { + "fs.realpath": "1.0.0", + "inflight": "1.0.6", + "inherits": "2.0.3", + "minimatch": "3.0.4", + "once": "1.4.0", + "path-is-absolute": "1.0.1" + } + }, + "has-unicode": { + "version": "2.0.1", + "bundled": true, + "optional": true + }, + "iconv-lite": { + "version": "0.4.21", + "bundled": true, + "optional": true, + "requires": { + "safer-buffer": "2.1.2" + } + }, + "ignore-walk": { + "version": "3.0.1", + "bundled": true, + "optional": true, + "requires": { + "minimatch": "3.0.4" + } + }, + "inflight": { + "version": "1.0.6", + "bundled": true, + "optional": true, + "requires": { + "once": "1.4.0", + "wrappy": "1.0.2" + } + }, + "inherits": { + "version": "2.0.3", + "bundled": true + }, + "ini": { + "version": "1.3.5", + "bundled": true, + "optional": true + }, + "is-fullwidth-code-point": { + "version": "1.0.0", + "bundled": true, + "requires": { + "number-is-nan": "1.0.1" + } + }, + "isarray": { + "version": "1.0.0", + "bundled": true, + "optional": true + }, + "minimatch": { + "version": "3.0.4", + "bundled": true, + "requires": { + "brace-expansion": "1.1.11" + } + }, + "minimist": { + "version": "0.0.8", + "bundled": true + }, + "minipass": { + "version": "2.2.4", + "bundled": true, + "requires": { + "safe-buffer": "5.1.1", + "yallist": "3.0.2" + } + }, + "minizlib": { + "version": "1.1.0", + "bundled": true, + "optional": true, + "requires": { + "minipass": "2.2.4" + } + }, + "mkdirp": { + "version": "0.5.1", + "bundled": true, + "requires": { + "minimist": "0.0.8" + } + }, + "ms": { + "version": "2.0.0", + "bundled": true, + "optional": true + }, + "needle": { + "version": "2.2.0", + "bundled": true, + "optional": true, + "requires": { + "debug": "2.6.9", + "iconv-lite": "0.4.21", + "sax": "1.2.4" + } + }, + "node-pre-gyp": { + "version": "0.10.0", + "bundled": true, + "optional": true, + "requires": { + "detect-libc": "1.0.3", + "mkdirp": "0.5.1", + "needle": "2.2.0", + "nopt": "4.0.1", + "npm-packlist": "1.1.10", + "npmlog": "4.1.2", + "rc": "1.2.7", + "rimraf": "2.6.2", + "semver": "5.5.0", + "tar": "4.4.1" + } + }, + "nopt": { + "version": "4.0.1", + "bundled": true, + "optional": true, + "requires": { + "abbrev": "1.1.1", + "osenv": "0.1.5" + } + }, + "npm-bundled": { + "version": "1.0.3", + "bundled": true, + "optional": true + }, + "npm-packlist": { + "version": "1.1.10", + "bundled": true, + "optional": true, + "requires": { + "ignore-walk": "3.0.1", + "npm-bundled": "1.0.3" + } + }, + "npmlog": { + "version": "4.1.2", + "bundled": true, + "optional": true, + "requires": { + "are-we-there-yet": "1.1.4", + "console-control-strings": "1.1.0", + "gauge": "2.7.4", + "set-blocking": "2.0.0" + } + }, + "number-is-nan": { + "version": "1.0.1", + "bundled": true + }, + "object-assign": { + "version": "4.1.1", + "bundled": true, + "optional": true + }, + "once": { + "version": "1.4.0", + "bundled": true, + "requires": { + "wrappy": "1.0.2" + } + }, + "os-homedir": { + "version": "1.0.2", + "bundled": true, + "optional": true + }, + "os-tmpdir": { + "version": "1.0.2", + "bundled": true, + "optional": true + }, + "osenv": { + "version": "0.1.5", + "bundled": true, + "optional": true, + "requires": { + "os-homedir": "1.0.2", + "os-tmpdir": "1.0.2" + } + }, + "path-is-absolute": { + "version": "1.0.1", + "bundled": true, + "optional": true + }, + "process-nextick-args": { + "version": "2.0.0", + "bundled": true, + "optional": true + }, + "rc": { + "version": "1.2.7", + "bundled": true, + "optional": true, + "requires": { + "deep-extend": "0.5.1", + "ini": "1.3.5", + "minimist": "1.2.0", + "strip-json-comments": "2.0.1" + }, + "dependencies": { + "minimist": { + "version": "1.2.0", + "bundled": true, + "optional": true + } + } + }, + "readable-stream": { + "version": "2.3.6", + "bundled": true, + "optional": true, + "requires": { + "core-util-is": "1.0.2", + "inherits": "2.0.3", + "isarray": "1.0.0", + "process-nextick-args": "2.0.0", + "safe-buffer": "5.1.1", + "string_decoder": "1.1.1", + "util-deprecate": "1.0.2" + } + }, + "rimraf": { + "version": "2.6.2", + "bundled": true, + "optional": true, + "requires": { + "glob": "7.1.2" + } + }, + "safe-buffer": { + "version": "5.1.1", + "bundled": true + }, + "safer-buffer": { + "version": "2.1.2", + "bundled": true, + "optional": true + }, + "sax": { + "version": "1.2.4", + "bundled": true, + "optional": true + }, + "semver": { + "version": "5.5.0", + "bundled": true, + "optional": true + }, + "set-blocking": { + "version": "2.0.0", + "bundled": true, + "optional": true + }, + "signal-exit": { + "version": "3.0.2", + "bundled": true, + "optional": true + }, + "string-width": { + "version": "1.0.2", + "bundled": true, + "requires": { + "code-point-at": "1.1.0", + "is-fullwidth-code-point": "1.0.0", + "strip-ansi": "3.0.1" + } + }, + "string_decoder": { + "version": "1.1.1", + "bundled": true, + "optional": true, + "requires": { + "safe-buffer": "5.1.1" + } + }, + "strip-ansi": { + "version": "3.0.1", + "bundled": true, + "requires": { + "ansi-regex": "2.1.1" + } + }, + "strip-json-comments": { + "version": "2.0.1", + "bundled": true, + "optional": true + }, + "tar": { + "version": "4.4.1", + "bundled": true, + "optional": true, + "requires": { + "chownr": "1.0.1", + "fs-minipass": "1.2.5", + "minipass": "2.2.4", + "minizlib": "1.1.0", + "mkdirp": "0.5.1", + "safe-buffer": "5.1.1", + "yallist": "3.0.2" + } + }, + "util-deprecate": { + "version": "1.0.2", + "bundled": true, + "optional": true + }, + "wide-align": { + "version": "1.1.2", + "bundled": true, + "optional": true, + "requires": { + "string-width": "1.0.2" + } + }, + "wrappy": { + "version": "1.0.2", + "bundled": true + }, + "yallist": { + "version": "3.0.2", + "bundled": true + } + } + }, + "fstream": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/fstream/-/fstream-1.0.11.tgz", + "integrity": "sha1-XB+x8RdHcRTwYyoOtLcbPLD9MXE=", + "dev": true, + "requires": { + "graceful-fs": "4.1.11", + "inherits": "2.0.3", + "mkdirp": "0.5.1", + "rimraf": "2.6.2" + } + }, + "fstream-ignore": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/fstream-ignore/-/fstream-ignore-1.0.5.tgz", + "integrity": "sha1-nDHa40dnAY/h0kmyTa2mfQktoQU=", + "dev": true, + "requires": { + "fstream": "1.0.11", + "inherits": "2.0.3", + "minimatch": "3.0.4" + } + }, + "ftp": { + "version": "0.3.10", + "resolved": "https://registry.npmjs.org/ftp/-/ftp-0.3.10.tgz", + "integrity": "sha1-kZfYYa2BQvPmPVqDv+TFn3MwiF0=", + "dev": true, + "optional": true, + "requires": { + "readable-stream": "1.1.14", + "xregexp": "2.0.0" + }, + "dependencies": { + "isarray": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", + "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=", + "dev": true, + "optional": true + }, + "readable-stream": { + "version": "1.1.14", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-1.1.14.tgz", + "integrity": "sha1-fPTFTvZI44EwhMY23SB54WbAgdk=", + "dev": true, + "optional": true, + "requires": { + "core-util-is": "1.0.2", + "inherits": "2.0.3", + "isarray": "0.0.1", + "string_decoder": "0.10.31" + } + }, + "string_decoder": { + "version": "0.10.31", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz", + "integrity": "sha1-YuIDvEF2bGwoyfyEMB2rHFMQ+pQ=", + "dev": true, + "optional": true + } + } + }, + "generate-function": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/generate-function/-/generate-function-2.0.0.tgz", + "integrity": "sha1-aFj+fAlpt9TpCTM3ZHrHn2DfvnQ=", + "dev": true, + "optional": true + }, + "generate-object-property": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/generate-object-property/-/generate-object-property-1.2.0.tgz", + "integrity": "sha1-nA4cQDCM6AT0eDYYuTf6iPmdUNA=", + "dev": true, + "optional": true, + "requires": { + "is-property": "1.0.2" + } + }, + "get-uri": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/get-uri/-/get-uri-2.0.2.tgz", + "integrity": "sha512-ZD325dMZOgerGqF/rF6vZXyFGTAay62svjQIT+X/oU2PtxYpFxvSkbsdi+oxIrsNxlZVd4y8wUDqkaExWTI/Cw==", + "dev": true, + "optional": true, + "requires": { + "data-uri-to-buffer": "1.2.0", + "debug": "2.6.9", + "extend": "3.0.1", + "file-uri-to-path": "1.0.0", + "ftp": "0.3.10", + "readable-stream": "2.3.6" + } + }, + "getpass": { + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz", + "integrity": "sha1-Xv+OPmhNVprkyysSgmBOi6YhSfo=", + "dev": true, + "requires": { + "assert-plus": "1.0.0" + } + }, + "glob": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.2.tgz", + "integrity": "sha512-MJTUg1kjuLeQCJ+ccE4Vpa6kKVXkPYJ2mOCQyUuKLcLQsdrMCpBPUi8qVE6+YuaJkozeA9NusTAw3hLr8Xe5EQ==", + "dev": true, + "requires": { + "fs.realpath": "1.0.0", + "inflight": "1.0.6", + "inherits": "2.0.3", + "minimatch": "3.0.4", + "once": "1.4.0", + "path-is-absolute": "1.0.1" + } + }, + "glob-base": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/glob-base/-/glob-base-0.3.0.tgz", + "integrity": "sha1-27Fk9iIbHAscz4Kuoyi0l98Oo8Q=", + "dev": true, + "requires": { + "glob-parent": "2.0.0", + "is-glob": "2.0.1" + } + }, + "glob-parent": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-2.0.0.tgz", + "integrity": "sha1-gTg9ctsFT8zPUzbaqQLxgvbtuyg=", + "dev": true, + "requires": { + "is-glob": "2.0.1" + } + }, + "got": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/got/-/got-0.3.0.tgz", + "integrity": "sha1-iI7GbKS8c1qwidvpWUltD3lIVJM=", + "dev": true, + "requires": { + "object-assign": "0.3.1" + }, + "dependencies": { + "object-assign": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-0.3.1.tgz", + "integrity": "sha1-Bg4qKifXwNd+x3t48Rqkf9iACNI=", + "dev": true + } + } + }, + "graceful-fs": { + "version": "4.1.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.1.11.tgz", + "integrity": "sha1-Dovf5NHduIVNZOBOp8AOKgJuVlg=", + "dev": true + }, + "handlebars": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/handlebars/-/handlebars-2.0.0.tgz", + "integrity": "sha1-bp1/hRSjRn+l6fgswVjs/B1ax28=", + "dev": true, + "requires": { + "optimist": "0.3.7", + "uglify-js": "2.3.6" + }, + "dependencies": { + "optimist": { + "version": "0.3.7", + "resolved": "https://registry.npmjs.org/optimist/-/optimist-0.3.7.tgz", + "integrity": "sha1-yQlBrVnkJzMokjB00s8ufLxuwNk=", + "dev": true, + "requires": { + "wordwrap": "0.0.3" + } + }, + "wordwrap": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-0.0.3.tgz", + "integrity": "sha1-o9XabNXAvAAI03I0u68b7WMFkQc=", + "dev": true + } + } + }, + "har-schema": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/har-schema/-/har-schema-2.0.0.tgz", + "integrity": "sha1-qUwiJOvKwEeCoNkDVSHyRzW37JI=", + "dev": true + }, + "har-validator": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/har-validator/-/har-validator-5.0.3.tgz", + "integrity": "sha1-ukAsJmGU8VlW7xXg/PJCmT9qff0=", + "dev": true, + "requires": { + "ajv": "5.5.2", + "har-schema": "2.0.0" + } + }, + "has-ansi": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/has-ansi/-/has-ansi-2.0.0.tgz", + "integrity": "sha1-NPUEnOHs3ysGSa8+8k5F7TVBbZE=", + "dev": true, + "optional": true, + "requires": { + "ansi-regex": "2.1.1" + } + }, + "has-binary2": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-binary2/-/has-binary2-1.0.3.tgz", + "integrity": "sha512-G1LWKhDSvhGeAQ8mPVQlqNcOB2sJdwATtZKl2pDKKHfpf/rYj24lkinxf69blJbnsvtqqNU+L3SL50vzZhXOnw==", + "dev": true, + "requires": { + "isarray": "2.0.1" + }, + "dependencies": { + "isarray": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.1.tgz", + "integrity": "sha1-o32U7ZzaLVmGXJ92/llu4fM4dB4=", + "dev": true + } + } + }, + "has-cors": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-cors/-/has-cors-1.1.0.tgz", + "integrity": "sha1-XkdHk/fqmEPRu5nCPu9J/xJv/zk=", + "dev": true + }, + "hasha": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/hasha/-/hasha-2.2.0.tgz", + "integrity": "sha1-eNfL/B5tZjA/55g3NlmEUXsvbuE=", + "dev": true, + "requires": { + "is-stream": "1.1.0", + "pinkie-promise": "2.0.1" + } + }, + "hawk": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/hawk/-/hawk-3.1.3.tgz", + "integrity": "sha1-B4REvXwWQLD+VA0sm3PVlnjo4cQ=", + "dev": true, + "optional": true, + "requires": { + "boom": "2.10.1", + "cryptiles": "2.0.5", + "hoek": "2.16.3", + "sntp": "1.0.9" + } + }, + "hipchat-notifier": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/hipchat-notifier/-/hipchat-notifier-1.1.0.tgz", + "integrity": "sha1-ttJJdVQ3wZEII2d5nTupoPI7Ix4=", + "dev": true, + "optional": true, + "requires": { + "lodash": "4.17.10", + "request": "2.87.0" + } + }, + "hoek": { + "version": "2.16.3", + "resolved": "https://registry.npmjs.org/hoek/-/hoek-2.16.3.tgz", + "integrity": "sha1-ILt0A9POo5jpHcRxCo/xuCdKJe0=", + "dev": true + }, + "http-errors": { + "version": "1.6.3", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz", + "integrity": "sha1-i1VoC7S+KDoLW/TqLjhYC+HZMg0=", + "dev": true, + "requires": { + "depd": "1.1.2", + "inherits": "2.0.3", + "setprototypeof": "1.1.0", + "statuses": "1.5.0" + } + }, + "http-proxy": { + "version": "1.17.0", + "resolved": "https://registry.npmjs.org/http-proxy/-/http-proxy-1.17.0.tgz", + "integrity": "sha512-Taqn+3nNvYRfJ3bGvKfBSRwy1v6eePlm3oc/aWVxZp57DQr5Eq3xhKJi7Z4hZpS8PC3H4qI+Yly5EmFacGuA/g==", + "dev": true, + "requires": { + "eventemitter3": "3.1.0", + "follow-redirects": "1.5.0", + "requires-port": "1.0.0" + } + }, + "http-proxy-agent": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-2.1.0.tgz", + "integrity": "sha512-qwHbBLV7WviBl0rQsOzH6o5lwyOIvwp/BdFnvVxXORldu5TmjFfjzBcWUWS5kWAZhmv+JtiDhSuQCp4sBfbIgg==", + "dev": true, + "requires": { + "agent-base": "4.2.0", + "debug": "3.1.0" + }, + "dependencies": { + "debug": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.1.0.tgz", + "integrity": "sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g==", + "dev": true, + "requires": { + "ms": "2.0.0" + } + } + } + }, + "http-signature": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.2.0.tgz", + "integrity": "sha1-muzZJRFHcvPZW2WmCruPfBj7rOE=", + "dev": true, + "requires": { + "assert-plus": "1.0.0", + "jsprim": "1.4.1", + "sshpk": "1.14.1" + } + }, + "httpntlm": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/httpntlm/-/httpntlm-1.6.1.tgz", + "integrity": "sha1-rQFScUOi6Hc8+uapb1hla7UqNLI=", + "dev": true, + "requires": { + "httpreq": "0.4.24", + "underscore": "1.7.0" + } + }, + "httpreq": { + "version": "0.4.24", + "resolved": "https://registry.npmjs.org/httpreq/-/httpreq-0.4.24.tgz", + "integrity": "sha1-QzX/2CzZaWaKOUZckprGHWOTYn8=", + "dev": true + }, + "https-proxy-agent": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-2.2.1.tgz", + "integrity": "sha512-HPCTS1LW51bcyMYbxUIOO4HEOlQ1/1qRaFWcyxvwaqUS9TY88aoEuHUY33kuAh1YhVVaDQhLZsnPd+XNARWZlQ==", + "dev": true, + "requires": { + "agent-base": "4.2.0", + "debug": "3.1.0" + }, + "dependencies": { + "debug": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.1.0.tgz", + "integrity": "sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g==", + "dev": true, + "requires": { + "ms": "2.0.0" + } + } + } + }, + "iconv-lite": { + "version": "0.4.23", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.23.tgz", + "integrity": "sha512-neyTUVFtahjf0mB3dZT77u+8O0QB89jFdnBkd5P1JgYPbPaia3gXXOVL2fq8VyU2gMMD7SaN7QukTB/pmXYvDA==", + "dev": true, + "requires": { + "safer-buffer": "2.1.2" + } + }, + "indexof": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/indexof/-/indexof-0.0.1.tgz", + "integrity": "sha1-gtwzbSMrkGIXnQWrMpOmYFn9Q10=", + "dev": true + }, + "inflection": { + "version": "1.12.0", + "resolved": "https://registry.npmjs.org/inflection/-/inflection-1.12.0.tgz", + "integrity": "sha1-ogCTVlbW9fa8TcdQLhrstwMihBY=", + "dev": true, + "optional": true + }, + "inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", + "dev": true, + "requires": { + "once": "1.4.0", + "wrappy": "1.0.2" + } + }, + "inherits": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", + "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=", + "dev": true + }, + "ini": { + "version": "1.3.5", + "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.5.tgz", + "integrity": "sha512-RZY5huIKCMRWDUqZlEi72f/lmXKMvuszcMBduliQ3nnWbx9X/ZBQO7DijMEYS9EhHBb2qacRUMtC7svLwe0lcw==", + "dev": true + }, + "inquirer": { + "version": "0.7.1", + "resolved": "https://registry.npmjs.org/inquirer/-/inquirer-0.7.1.tgz", + "integrity": "sha1-uKzxQBZb1YGGLtEZj7bSZDAJH6w=", + "dev": true, + "requires": { + "chalk": "0.5.1", + "cli-color": "0.3.3", + "figures": "1.7.0", + "lodash": "2.4.2", + "mute-stream": "0.0.4", + "readline2": "0.1.1", + "rx": "2.5.3", + "through": "2.3.8" + }, + "dependencies": { + "ansi-regex": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-0.2.1.tgz", + "integrity": "sha1-DY6UaWej2BQ/k+JOKYUl/BsiNfk=", + "dev": true + }, + "ansi-styles": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-1.1.0.tgz", + "integrity": "sha1-6uy/Zs1waIJ2Cy9GkVgrj1XXp94=", + "dev": true + }, + "chalk": { + "version": "0.5.1", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-0.5.1.tgz", + "integrity": "sha1-Zjs6ZItotV0EaQ1JFnqoN4WPIXQ=", + "dev": true, + "requires": { + "ansi-styles": "1.1.0", + "escape-string-regexp": "1.0.5", + "has-ansi": "0.1.0", + "strip-ansi": "0.3.0", + "supports-color": "0.2.0" + } + }, + "has-ansi": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/has-ansi/-/has-ansi-0.1.0.tgz", + "integrity": "sha1-hPJlqujA5qiKEtcCKJS3VoiUxi4=", + "dev": true, + "requires": { + "ansi-regex": "0.2.1" + } + }, + "lodash": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-2.4.2.tgz", + "integrity": "sha1-+t2DS5aDBz2hebPq5tnA0VBT9z4=", + "dev": true + }, + "strip-ansi": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-0.3.0.tgz", + "integrity": "sha1-JfSOoiynkYfzF0pNuHWTR7sSYiA=", + "dev": true, + "requires": { + "ansi-regex": "0.2.1" + } + }, + "supports-color": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-0.2.0.tgz", + "integrity": "sha1-2S3iaU6z9nMjlz1649i1W0wiGQo=", + "dev": true + } + } + }, + "insight": { + "version": "0.4.3", + "resolved": "https://registry.npmjs.org/insight/-/insight-0.4.3.tgz", + "integrity": "sha1-dtZTxcDYBIsDzbpjhaaUj3RhSvA=", + "dev": true, + "requires": { + "async": "0.9.2", + "chalk": "0.5.1", + "configstore": "0.3.2", + "inquirer": "0.6.0", + "lodash.debounce": "2.4.1", + "object-assign": "1.0.0", + "os-name": "1.0.3", + "request": "2.87.0", + "tough-cookie": "0.12.1" + }, + "dependencies": { + "ansi-regex": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-0.2.1.tgz", + "integrity": "sha1-DY6UaWej2BQ/k+JOKYUl/BsiNfk=", + "dev": true + }, + "ansi-styles": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-1.1.0.tgz", + "integrity": "sha1-6uy/Zs1waIJ2Cy9GkVgrj1XXp94=", + "dev": true + }, + "async": { + "version": "0.9.2", + "resolved": "https://registry.npmjs.org/async/-/async-0.9.2.tgz", + "integrity": "sha1-rqdNXmHB+JlhO/ZL2mbUx48v0X0=", + "dev": true + }, + "chalk": { + "version": "0.5.1", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-0.5.1.tgz", + "integrity": "sha1-Zjs6ZItotV0EaQ1JFnqoN4WPIXQ=", + "dev": true, + "requires": { + "ansi-styles": "1.1.0", + "escape-string-regexp": "1.0.5", + "has-ansi": "0.1.0", + "strip-ansi": "0.3.0", + "supports-color": "0.2.0" + } + }, + "has-ansi": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/has-ansi/-/has-ansi-0.1.0.tgz", + "integrity": "sha1-hPJlqujA5qiKEtcCKJS3VoiUxi4=", + "dev": true, + "requires": { + "ansi-regex": "0.2.1" + } + }, + "inquirer": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/inquirer/-/inquirer-0.6.0.tgz", + "integrity": "sha1-YU17s+SPnmqAKOlKDDjyPvKYI9M=", + "dev": true, + "requires": { + "chalk": "0.5.1", + "cli-color": "0.3.3", + "lodash": "2.4.2", + "mute-stream": "0.0.4", + "readline2": "0.1.1", + "rx": "2.5.3", + "through": "2.3.8" + } + }, + "lodash": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-2.4.2.tgz", + "integrity": "sha1-+t2DS5aDBz2hebPq5tnA0VBT9z4=", + "dev": true + }, + "object-assign": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-1.0.0.tgz", + "integrity": "sha1-5l3Idm07R7S4MHRlyDEdoDCwcKY=", + "dev": true + }, + "strip-ansi": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-0.3.0.tgz", + "integrity": "sha1-JfSOoiynkYfzF0pNuHWTR7sSYiA=", + "dev": true, + "requires": { + "ansi-regex": "0.2.1" + } + }, + "supports-color": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-0.2.0.tgz", + "integrity": "sha1-2S3iaU6z9nMjlz1649i1W0wiGQo=", + "dev": true + }, + "tough-cookie": { + "version": "0.12.1", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-0.12.1.tgz", + "integrity": "sha1-giDH4hq9WxPZaAQlS9WoHr8sfWI=", + "dev": true, + "requires": { + "punycode": "1.4.1" + } + } + } + }, + "intersect": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/intersect/-/intersect-0.0.3.tgz", + "integrity": "sha1-waSl5erG7eSvdQTMB+Ctp7yfSSA=", + "dev": true + }, + "ip": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/ip/-/ip-1.1.5.tgz", + "integrity": "sha1-vd7XARQpCCjAoDnnLvJfWq7ENUo=", + "dev": true + }, + "is-binary-path": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-1.0.1.tgz", + "integrity": "sha1-dfFmQrSA8YenEcgUFh/TpKdlWJg=", + "dev": true, + "requires": { + "binary-extensions": "1.11.0" + } + }, + "is-buffer": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", + "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==", + "dev": true + }, + "is-dotfile": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/is-dotfile/-/is-dotfile-1.0.3.tgz", + "integrity": "sha1-pqLzL/0t+wT1yiXs0Pa4PPeYoeE=", + "dev": true + }, + "is-equal-shallow": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/is-equal-shallow/-/is-equal-shallow-0.1.3.tgz", + "integrity": "sha1-IjgJj8Ih3gvPpdnqxMRdY4qhxTQ=", + "dev": true, + "requires": { + "is-primitive": "2.0.0" + } + }, + "is-extendable": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz", + "integrity": "sha1-YrEQ4omkcUGOPsNqYX1HLjAd/Ik=", + "dev": true + }, + "is-extglob": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-1.0.0.tgz", + "integrity": "sha1-rEaBd8SUNAWgkvyPKXYMb/xiBsA=", + "dev": true + }, + "is-glob": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-2.0.1.tgz", + "integrity": "sha1-0Jb5JqPe1WAPP9/ZEZjLCIjC2GM=", + "dev": true, + "requires": { + "is-extglob": "1.0.0" + } + }, + "is-my-ip-valid": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-my-ip-valid/-/is-my-ip-valid-1.0.0.tgz", + "integrity": "sha512-gmh/eWXROncUzRnIa1Ubrt5b8ep/MGSnfAUI3aRp+sqTCs1tv1Isl8d8F6JmkN3dXKc3ehZMrtiPN9eL03NuaQ==", + "dev": true, + "optional": true + }, + "is-my-json-valid": { + "version": "2.17.2", + "resolved": "https://registry.npmjs.org/is-my-json-valid/-/is-my-json-valid-2.17.2.tgz", + "integrity": "sha512-IBhBslgngMQN8DDSppmgDv7RNrlFotuuDsKcrCP3+HbFaVivIBU7u9oiiErw8sH4ynx3+gOGQ3q2otkgiSi6kg==", + "dev": true, + "optional": true, + "requires": { + "generate-function": "2.0.0", + "generate-object-property": "1.2.0", + "is-my-ip-valid": "1.0.0", + "jsonpointer": "4.0.1", + "xtend": "4.0.1" + } + }, + "is-number": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-2.1.0.tgz", + "integrity": "sha1-Afy7s5NGOlSPL0ZszhbezknbkI8=", + "dev": true, + "requires": { + "kind-of": "3.2.2" + } + }, + "is-posix-bracket": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/is-posix-bracket/-/is-posix-bracket-0.1.1.tgz", + "integrity": "sha1-MzTceXdDaOkvAW5vvAqI9c1ua8Q=", + "dev": true + }, + "is-primitive": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-primitive/-/is-primitive-2.0.0.tgz", + "integrity": "sha1-IHurkWOEmcB7Kt8kCkGochADRXU=", + "dev": true + }, + "is-property": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-property/-/is-property-1.0.2.tgz", + "integrity": "sha1-V/4cTkhHTt1lsJkR8msc1Ald2oQ=", + "dev": true, + "optional": true + }, + "is-root": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-root/-/is-root-1.0.0.tgz", + "integrity": "sha1-B7bCM7w5TNnQK6FclmvWZg1jQtU=", + "dev": true + }, + "is-stream": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", + "integrity": "sha1-EtSj3U5o4Lec6428hBc66A2RykQ=", + "dev": true + }, + "is-typedarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", + "integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo=", + "dev": true + }, + "isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=", + "dev": true + }, + "isbinaryfile": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/isbinaryfile/-/isbinaryfile-3.0.2.tgz", + "integrity": "sha1-Sj6XTsDLqQBNP8bN5yCeppNopiE=", + "dev": true + }, + "isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=", + "dev": true + }, + "isobject": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-2.1.0.tgz", + "integrity": "sha1-8GVWEJaj8dou9GJy+BXIQNh+DIk=", + "dev": true, + "requires": { + "isarray": "1.0.0" + } + }, + "isstream": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz", + "integrity": "sha1-R+Y/evVa+m+S4VAOaQ64uFKcCZo=", + "dev": true + }, + "jasmine": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/jasmine/-/jasmine-3.1.0.tgz", + "integrity": "sha1-K9Wf1+xuwOistk4J9Fpo7SrRlSo=", + "dev": true, + "requires": { + "glob": "7.1.2", + "jasmine-core": "3.1.0" + } + }, + "jasmine-ajax": { + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/jasmine-ajax/-/jasmine-ajax-3.4.0.tgz", + "integrity": "sha512-LIVNVCmx5ou+IG6wgX7j73YYzvE2e3aqFWMjOhvAHWTnLICOYSobIH+PG/gOwtP20X0u2SkD3NXT/j5X8rMGOA==", + "dev": true + }, + "jasmine-core": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/jasmine-core/-/jasmine-core-3.1.0.tgz", + "integrity": "sha1-pHheE11d9lAk38kiSVPfWFvSdmw=", + "dev": true + }, + "jasmine-jquery": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/jasmine-jquery/-/jasmine-jquery-2.1.1.tgz", + "integrity": "sha1-1AleZGlEomdjI1dpqwGNnzDw1Hs=", + "dev": true + }, + "jasmine-spec-reporter": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/jasmine-spec-reporter/-/jasmine-spec-reporter-4.2.1.tgz", + "integrity": "sha512-FZBoZu7VE5nR7Nilzy+Np8KuVIOxF4oXDPDknehCYBDE080EnlPu0afdZNmpGDBRCUBv3mj5qgqCRmk6W/K8vg==", + "dev": true, + "requires": { + "colors": "1.1.2" + }, + "dependencies": { + "colors": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/colors/-/colors-1.1.2.tgz", + "integrity": "sha1-FopHAXVran9RoSzgyXv6KMCE7WM=", + "dev": true + } + } + }, + "jquery": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/jquery/-/jquery-3.3.1.tgz", + "integrity": "sha512-Ubldcmxp5np52/ENotGxlLe6aGMvmF4R8S6tZjsP6Knsaxd/xp3Zrh50cG93lR6nPXyUFwzN3ZSOQI0wRJNdGg==", + "dev": true + }, + "js-yaml": { + "version": "3.11.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.11.0.tgz", + "integrity": "sha512-saJstZWv7oNeOyBh3+Dx1qWzhW0+e6/8eDzo7p5rDFqxntSztloLtuKu+Ejhtq82jsilwOIZYsCz+lIjthg1Hw==", + "dev": true, + "requires": { + "argparse": "1.0.10", + "esprima": "4.0.0" + }, + "dependencies": { + "esprima": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.0.tgz", + "integrity": "sha512-oftTcaMu/EGrEIu904mWteKIv8vMuOgGYo7EhVJJN00R/EED9DCua/xxHRdYnKtcECzVg7xOWhflvJMnqcFZjw==", + "dev": true + } + } + }, + "jsbn": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz", + "integrity": "sha1-peZUwuWi3rXyAdls77yoDA7y9RM=", + "dev": true, + "optional": true + }, + "json-schema": { + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.2.3.tgz", + "integrity": "sha1-tIDIkuWaLwWVTOcnvT8qTogvnhM=", + "dev": true + }, + "json-schema-traverse": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.3.1.tgz", + "integrity": "sha1-NJptRMU6Ud6JtAgFxdXlm0F9M0A=", + "dev": true + }, + "json-stringify-safe": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", + "integrity": "sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus=", + "dev": true + }, + "jsonfile": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-2.4.0.tgz", + "integrity": "sha1-NzaitCi4e72gzIO1P6PWM6NcKug=", + "dev": true, + "requires": { + "graceful-fs": "4.1.11" + } + }, + "jsonify": { + "version": "0.0.0", + "resolved": "https://registry.npmjs.org/jsonify/-/jsonify-0.0.0.tgz", + "integrity": "sha1-LHS27kHZPKUbe1qu6PUDYx0lKnM=", + "dev": true + }, + "jsonpointer": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/jsonpointer/-/jsonpointer-4.0.1.tgz", + "integrity": "sha1-T9kss04OnbPInIYi7PUfm5eMbLk=", + "dev": true, + "optional": true + }, + "jsprim": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.1.tgz", + "integrity": "sha1-MT5mvB5cwG5Di8G3SZwuXFastqI=", + "dev": true, + "requires": { + "assert-plus": "1.0.0", + "extsprintf": "1.3.0", + "json-schema": "0.2.3", + "verror": "1.10.0" + } + }, + "junk": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/junk/-/junk-1.0.3.tgz", + "integrity": "sha1-h75jSIZJy9ym9Tqzm+yczSNH9ZI=", + "dev": true + }, + "karma": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/karma/-/karma-2.0.2.tgz", + "integrity": "sha1-TS25QChQpmVR+nhLAWT7CCTtjEs=", + "dev": true, + "requires": { + "bluebird": "3.5.1", + "body-parser": "1.18.3", + "chokidar": "1.7.0", + "colors": "1.3.0", + "combine-lists": "1.0.1", + "connect": "3.6.6", + "core-js": "2.5.7", + "di": "0.0.1", + "dom-serialize": "2.2.1", + "expand-braces": "0.1.2", + "glob": "7.1.2", + "graceful-fs": "4.1.11", + "http-proxy": "1.17.0", + "isbinaryfile": "3.0.2", + "lodash": "4.17.10", + "log4js": "2.7.0", + "mime": "1.6.0", + "minimatch": "3.0.4", + "optimist": "0.6.1", + "qjobs": "1.2.0", + "range-parser": "1.2.0", + "rimraf": "2.6.2", + "safe-buffer": "5.1.2", + "socket.io": "2.0.4", + "source-map": "0.6.1", + "tmp": "0.0.33", + "useragent": "2.2.1" + } + }, + "karma-jasmine": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/karma-jasmine/-/karma-jasmine-1.1.2.tgz", + "integrity": "sha1-OU8rJf+0pkS5rabyLUQ+L9CIhsM=", + "dev": true + }, + "karma-jasmine-ajax": { + "version": "0.1.13", + "resolved": "https://registry.npmjs.org/karma-jasmine-ajax/-/karma-jasmine-ajax-0.1.13.tgz", + "integrity": "sha1-eLuS2Jb+MqJaGACYxHci4dlgW/w=", + "dev": true, + "requires": { + "jasmine-ajax": "3.4.0" + } + }, + "karma-jasmine-jquery": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/karma-jasmine-jquery/-/karma-jasmine-jquery-0.1.1.tgz", + "integrity": "sha1-icG3VP6kElsfiTghOSwRuc5ddFY=", + "dev": true, + "requires": { + "bower": "1.8.4", + "bower-installer": "git://github.com/bessdsv/bower-installer.git#7f9cece1e6fada50f44dc0851e1d85815cd1b4a7" + }, + "dependencies": { + "ansi-regex": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-0.2.1.tgz", + "integrity": "sha1-DY6UaWej2BQ/k+JOKYUl/BsiNfk=", + "dev": true + }, + "ansi-styles": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-1.1.0.tgz", + "integrity": "sha1-6uy/Zs1waIJ2Cy9GkVgrj1XXp94=", + "dev": true + }, + "asn1": { + "version": "0.1.11", + "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.1.11.tgz", + "integrity": "sha1-VZvhg3bQik7E2+gId9J4GGObLfc=", + "dev": true, + "optional": true + }, + "assert-plus": { + "version": "0.1.5", + "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-0.1.5.tgz", + "integrity": "sha1-7nQAlBMALYTOxyGcasgRgS5yMWA=", + "dev": true, + "optional": true + }, + "async": { + "version": "0.2.10", + "resolved": "https://registry.npmjs.org/async/-/async-0.2.10.tgz", + "integrity": "sha1-trvgsGdLnXGXCMo43owjfLUmw9E=", + "dev": true + }, + "aws-sign2": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.5.0.tgz", + "integrity": "sha1-xXED96F/wDfwLXwuZLYC6iI/fWM=", + "dev": true, + "optional": true + }, + "bl": { + "version": "0.9.5", + "resolved": "https://registry.npmjs.org/bl/-/bl-0.9.5.tgz", + "integrity": "sha1-wGt5evCF6gC8Unr8jvzxHeIjIFQ=", + "dev": true, + "requires": { + "readable-stream": "1.0.34" + } + }, + "boom": { + "version": "0.4.2", + "resolved": "https://registry.npmjs.org/boom/-/boom-0.4.2.tgz", + "integrity": "sha1-emNune1O/O+xnO9JR6PGffrukRs=", + "dev": true, + "requires": { + "hoek": "0.9.1" + } + }, + "bower-installer": { + "version": "git://github.com/bessdsv/bower-installer.git#7f9cece1e6fada50f44dc0851e1d85815cd1b4a7", + "dev": true, + "requires": { + "async": "0.2.10", + "bower": "1.3.12", + "colors": "0.6.2", + "glob": "3.2.11", + "lodash": "0.9.2", + "mkdirp": "0.3.5", + "node-fs": "0.1.7", + "nopt": "2.1.2" + }, + "dependencies": { + "bower": { + "version": "1.3.12", + "resolved": "https://registry.npmjs.org/bower/-/bower-1.3.12.tgz", + "integrity": "sha1-N94O2zkEuvkK7hM4Sho3mgXuIUw=", + "dev": true, + "requires": { + "abbrev": "1.0.9", + "archy": "0.0.2", + "bower-config": "0.5.3", + "bower-endpoint-parser": "0.2.2", + "bower-json": "0.4.0", + "bower-logger": "0.2.2", + "bower-registry-client": "0.2.4", + "cardinal": "0.4.0", + "chalk": "0.5.0", + "chmodr": "0.1.0", + "decompress-zip": "0.0.8", + "fstream": "1.0.11", + "fstream-ignore": "1.0.5", + "glob": "4.0.6", + "graceful-fs": "3.0.11", + "handlebars": "2.0.0", + "inquirer": "0.7.1", + "insight": "0.4.3", + "is-root": "1.0.0", + "junk": "1.0.3", + "lockfile": "1.0.4", + "lru-cache": "2.5.2", + "mkdirp": "0.5.0", + "mout": "0.9.1", + "nopt": "3.0.6", + "opn": "1.0.2", + "osenv": "0.1.0", + "p-throttler": "0.1.0", + "promptly": "0.2.0", + "q": "1.0.1", + "request": "2.42.0", + "request-progress": "0.3.0", + "retry": "0.6.0", + "rimraf": "2.2.8", + "semver": "2.3.2", + "shell-quote": "1.4.3", + "stringify-object": "1.0.1", + "tar-fs": "0.5.2", + "tmp": "0.0.23", + "update-notifier": "0.2.0", + "which": "1.0.9" + }, + "dependencies": { + "glob": { + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/glob/-/glob-4.0.6.tgz", + "integrity": "sha1-aVxQvdTi+1xdNwsJHziNNwfikac=", + "dev": true, + "requires": { + "graceful-fs": "3.0.11", + "inherits": "2.0.3", + "minimatch": "1.0.0", + "once": "1.4.0" + } + }, + "mkdirp": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.0.tgz", + "integrity": "sha1-HXMHam35hs2TROFecfzAWkyavxI=", + "dev": true, + "requires": { + "minimist": "0.0.8" + } + }, + "nopt": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/nopt/-/nopt-3.0.6.tgz", + "integrity": "sha1-xkZdvwirzU2zWTF/eaxopkayj/k=", + "dev": true, + "requires": { + "abbrev": "1.0.9" + } + } + } + } + } + }, + "caseless": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.6.0.tgz", + "integrity": "sha1-gWfBq4OX+1u5X5bSjlqBxQ8kesQ=", + "dev": true + }, + "chalk": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-0.5.0.tgz", + "integrity": "sha1-N138y8IcCmCothvFt489wqVcIS8=", + "dev": true, + "requires": { + "ansi-styles": "1.1.0", + "escape-string-regexp": "1.0.5", + "has-ansi": "0.1.0", + "strip-ansi": "0.3.0", + "supports-color": "0.2.0" + } + }, + "colors": { + "version": "0.6.2", + "resolved": "https://registry.npmjs.org/colors/-/colors-0.6.2.tgz", + "integrity": "sha1-JCP+ZnisDF2uiFLl0OW+CMmXq8w=", + "dev": true + }, + "combined-stream": { + "version": "0.0.7", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-0.0.7.tgz", + "integrity": "sha1-ATfmV7qlp1QcV6w3rF/AfXO03B8=", + "dev": true, + "optional": true, + "requires": { + "delayed-stream": "0.0.5" + } + }, + "cryptiles": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/cryptiles/-/cryptiles-0.2.2.tgz", + "integrity": "sha1-7ZH/HxetE9N0gohZT4pIoNJvMlw=", + "dev": true, + "optional": true, + "requires": { + "boom": "0.4.2" + } + }, + "delayed-stream": { + "version": "0.0.5", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-0.0.5.tgz", + "integrity": "sha1-1LH0OpPoKW3+AmlPRoC8N6MTxz8=", + "dev": true, + "optional": true + }, + "forever-agent": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.5.2.tgz", + "integrity": "sha1-bQ4JxJIflKJ/Y9O0nF/v8epMUTA=", + "dev": true + }, + "form-data": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-0.1.4.tgz", + "integrity": "sha1-kavXiKupcCsaq/qLwBAxoqyeOxI=", + "dev": true, + "optional": true, + "requires": { + "async": "0.9.2", + "combined-stream": "0.0.7", + "mime": "1.2.11" + }, + "dependencies": { + "async": { + "version": "0.9.2", + "resolved": "https://registry.npmjs.org/async/-/async-0.9.2.tgz", + "integrity": "sha1-rqdNXmHB+JlhO/ZL2mbUx48v0X0=", + "dev": true, + "optional": true + } + } + }, + "glob": { + "version": "3.2.11", + "resolved": "https://registry.npmjs.org/glob/-/glob-3.2.11.tgz", + "integrity": "sha1-Spc/Y1uRkPcV0QmH1cAP0oFevj0=", + "dev": true, + "requires": { + "inherits": "2.0.3", + "minimatch": "0.3.0" + }, + "dependencies": { + "minimatch": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-0.3.0.tgz", + "integrity": "sha1-J12O2qxPG7MyZHIInnlJyDlGmd0=", + "dev": true, + "requires": { + "lru-cache": "2.5.2", + "sigmund": "1.0.1" + } + } + } + }, + "graceful-fs": { + "version": "3.0.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-3.0.11.tgz", + "integrity": "sha1-dhPHeKGv6mLyXGMKCG1/Osu92Bg=", + "dev": true, + "requires": { + "natives": "1.1.4" + } + }, + "has-ansi": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/has-ansi/-/has-ansi-0.1.0.tgz", + "integrity": "sha1-hPJlqujA5qiKEtcCKJS3VoiUxi4=", + "dev": true, + "requires": { + "ansi-regex": "0.2.1" + } + }, + "hawk": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/hawk/-/hawk-1.1.1.tgz", + "integrity": "sha1-h81JH5tG5OKurKM1QWdmiF0tHtk=", + "dev": true, + "optional": true, + "requires": { + "boom": "0.4.2", + "cryptiles": "0.2.2", + "hoek": "0.9.1", + "sntp": "0.2.4" + } + }, + "hoek": { + "version": "0.9.1", + "resolved": "https://registry.npmjs.org/hoek/-/hoek-0.9.1.tgz", + "integrity": "sha1-PTIkYrrfB3Fup+uFuviAec3c5QU=", + "dev": true + }, + "http-signature": { + "version": "0.10.1", + "resolved": "https://registry.npmjs.org/http-signature/-/http-signature-0.10.1.tgz", + "integrity": "sha1-T72sEyVZqoMjEh5UB3nAoBKyfmY=", + "dev": true, + "optional": true, + "requires": { + "asn1": "0.1.11", + "assert-plus": "0.1.5", + "ctype": "0.5.3" + } + }, + "isarray": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", + "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=", + "dev": true + }, + "lodash": { + "version": "0.9.2", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-0.9.2.tgz", + "integrity": "sha1-jzSZxSRdNG1oLlsNO0B2fgnxqSw=", + "dev": true + }, + "lru-cache": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-2.5.2.tgz", + "integrity": "sha1-H92tk4quEmPOE4aAvhs/WRwKtBw=", + "dev": true + }, + "mime": { + "version": "1.2.11", + "resolved": "https://registry.npmjs.org/mime/-/mime-1.2.11.tgz", + "integrity": "sha1-WCA+7Ybjpe8XrtK32evUfwpg3RA=", + "dev": true, + "optional": true + }, + "mime-types": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-1.0.2.tgz", + "integrity": "sha1-mVrhOSq4r/y/yyZB3QVOlDwNXc4=", + "dev": true + }, + "minimatch": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-1.0.0.tgz", + "integrity": "sha1-4N0hILSeG3JM6NcUxSCCKpQ4V20=", + "dev": true, + "requires": { + "lru-cache": "2.5.2", + "sigmund": "1.0.1" + } + }, + "mkdirp": { + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.3.5.tgz", + "integrity": "sha1-3j5fiWHIjHh+4TaN+EmsRBPsqNc=", + "dev": true + }, + "node-uuid": { + "version": "1.4.8", + "resolved": "https://registry.npmjs.org/node-uuid/-/node-uuid-1.4.8.tgz", + "integrity": "sha1-sEDrCSOWivq/jTL7HxfxFn/auQc=", + "dev": true + }, + "oauth-sign": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.4.0.tgz", + "integrity": "sha1-8ilW8x6nFRqCHl8vsywRPK2Ln2k=", + "dev": true, + "optional": true + }, + "qs": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/qs/-/qs-1.2.2.tgz", + "integrity": "sha1-GbV/8k3CqZzh+L32r82ln472H4g=", + "dev": true + }, + "readable-stream": { + "version": "1.0.34", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-1.0.34.tgz", + "integrity": "sha1-Elgg40vIQtLyqq+v5MKRbuMsFXw=", + "dev": true, + "requires": { + "core-util-is": "1.0.2", + "inherits": "2.0.3", + "isarray": "0.0.1", + "string_decoder": "0.10.31" + } + }, + "request": { + "version": "2.42.0", + "resolved": "https://registry.npmjs.org/request/-/request-2.42.0.tgz", + "integrity": "sha1-VyvQFIk4VkBArHqxSLlkI6BjMEo=", + "dev": true, + "requires": { + "aws-sign2": "0.5.0", + "bl": "0.9.5", + "caseless": "0.6.0", + "forever-agent": "0.5.2", + "form-data": "0.1.4", + "hawk": "1.1.1", + "http-signature": "0.10.1", + "json-stringify-safe": "5.0.1", + "mime-types": "1.0.2", + "node-uuid": "1.4.8", + "oauth-sign": "0.4.0", + "qs": "1.2.2", + "stringstream": "0.0.6", + "tough-cookie": "2.3.4", + "tunnel-agent": "0.4.3" + } + }, + "rimraf": { + "version": "2.2.8", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.2.8.tgz", + "integrity": "sha1-5Dm+Kq7jJzIZUnMPmaiSnk/FBYI=", + "dev": true + }, + "semver": { + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-2.3.2.tgz", + "integrity": "sha1-uYSPJdbPNjMwc+ye+IVtQvEjPlI=", + "dev": true + }, + "sntp": { + "version": "0.2.4", + "resolved": "https://registry.npmjs.org/sntp/-/sntp-0.2.4.tgz", + "integrity": "sha1-+4hfGLDzqtGJ+CSGJTa87ux1CQA=", + "dev": true, + "optional": true, + "requires": { + "hoek": "0.9.1" + } + }, + "string_decoder": { + "version": "0.10.31", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz", + "integrity": "sha1-YuIDvEF2bGwoyfyEMB2rHFMQ+pQ=", + "dev": true + }, + "strip-ansi": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-0.3.0.tgz", + "integrity": "sha1-JfSOoiynkYfzF0pNuHWTR7sSYiA=", + "dev": true, + "requires": { + "ansi-regex": "0.2.1" + } + }, + "supports-color": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-0.2.0.tgz", + "integrity": "sha1-2S3iaU6z9nMjlz1649i1W0wiGQo=", + "dev": true + }, + "tmp": { + "version": "0.0.23", + "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.0.23.tgz", + "integrity": "sha1-3odKpel0qF8KMs39vXRmPLO9nHQ=", + "dev": true + }, + "tunnel-agent": { + "version": "0.4.3", + "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.4.3.tgz", + "integrity": "sha1-Y3PbdpCf5XDgjXNYM2Xtgop07us=", + "dev": true + } + } + }, + "karma-phantomjs-launcher": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/karma-phantomjs-launcher/-/karma-phantomjs-launcher-1.0.4.tgz", + "integrity": "sha1-0jyjSAG9qYY60xjju0vUBisTrNI=", + "dev": true, + "requires": { + "lodash": "4.17.10", + "phantomjs-prebuilt": "2.1.16" + } + }, + "kew": { + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/kew/-/kew-0.7.0.tgz", + "integrity": "sha1-edk9LTM2PW/dKXCzNdkUGtWR15s=", + "dev": true + }, + "kind-of": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", + "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", + "dev": true, + "requires": { + "is-buffer": "1.1.6" + } + }, + "klaw": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/klaw/-/klaw-1.3.1.tgz", + "integrity": "sha1-QIhDO0azsbolnXh4XY6W9zugJDk=", + "dev": true, + "requires": { + "graceful-fs": "4.1.11" + } + }, + "latest-version": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/latest-version/-/latest-version-0.2.0.tgz", + "integrity": "sha1-ra+JjV8iOA0/nEU4bv3/ChtbdQE=", + "dev": true, + "requires": { + "package-json": "0.2.0" + } + }, + "levn": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.3.0.tgz", + "integrity": "sha1-OwmSTt+fCDwEkP3UwLxEIeBHZO4=", + "dev": true, + "optional": true, + "requires": { + "prelude-ls": "1.1.2", + "type-check": "0.3.2" + } + }, + "libbase64": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/libbase64/-/libbase64-0.1.0.tgz", + "integrity": "sha1-YjUag5VjrF/1vSbxL2Dpgwu3UeY=", + "dev": true + }, + "libmime": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/libmime/-/libmime-3.0.0.tgz", + "integrity": "sha1-UaGp50SOy9Ms2lRCFnW7IbwJPaY=", + "dev": true, + "requires": { + "iconv-lite": "0.4.15", + "libbase64": "0.1.0", + "libqp": "1.1.0" + }, + "dependencies": { + "iconv-lite": { + "version": "0.4.15", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.15.tgz", + "integrity": "sha1-/iZaIYrGpXz+hUkn6dBMGYJe3es=", + "dev": true + } + } + }, + "libqp": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/libqp/-/libqp-1.1.0.tgz", + "integrity": "sha1-9ebgatdLeU+1tbZpiL9yjvHe2+g=", + "dev": true + }, + "lockfile": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/lockfile/-/lockfile-1.0.4.tgz", + "integrity": "sha512-cvbTwETRfsFh4nHsL1eGWapU1XFi5Ot9E85sWAwia7Y7EgB7vfqcZhTKZ+l7hCGxSPoushMv5GKhT5PdLv03WA==", + "dev": true, + "requires": { + "signal-exit": "3.0.2" + } + }, + "lodash": { + "version": "4.17.10", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.10.tgz", + "integrity": "sha512-UejweD1pDoXu+AD825lWwp4ZGtSwgnpZxb3JDViD7StjQz+Nb/6l093lx4OQ0foGWNRoc19mWy7BzL+UAK2iVg==", + "dev": true + }, + "lodash._isnative": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/lodash._isnative/-/lodash._isnative-2.4.1.tgz", + "integrity": "sha1-PqZAS3hKe+g2x7V1gOHN95sUgyw=", + "dev": true + }, + "lodash._objecttypes": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/lodash._objecttypes/-/lodash._objecttypes-2.4.1.tgz", + "integrity": "sha1-fAt/admKH3ZSn4kLDNsbTf7BHBE=", + "dev": true + }, + "lodash.debounce": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-2.4.1.tgz", + "integrity": "sha1-2M6tJG7EuSbouFZ4/Dlr/rqMxvw=", + "dev": true, + "requires": { + "lodash.isfunction": "2.4.1", + "lodash.isobject": "2.4.1", + "lodash.now": "2.4.1" + } + }, + "lodash.isfunction": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/lodash.isfunction/-/lodash.isfunction-2.4.1.tgz", + "integrity": "sha1-LP1XXHPkmKtX4xm3f6Aq3vE6lNE=", + "dev": true + }, + "lodash.isobject": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/lodash.isobject/-/lodash.isobject-2.4.1.tgz", + "integrity": "sha1-Wi5H/mmVPx7mMafrof5k0tBlWPU=", + "dev": true, + "requires": { + "lodash._objecttypes": "2.4.1" + } + }, + "lodash.now": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/lodash.now/-/lodash.now-2.4.1.tgz", + "integrity": "sha1-aHIVZQBSUYX6+WeFu3/n/hW1YsY=", + "dev": true, + "requires": { + "lodash._isnative": "2.4.1" + } + }, + "log4js": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/log4js/-/log4js-2.7.0.tgz", + "integrity": "sha512-FyTwaPJfbfiK2AHc9ct/oFHNN4bJj0IQeqdO/LaDHhfjeBi8fnZU5rPcHOZhkYV0Aes31Ow+St1YTCluPtzs5g==", + "dev": true, + "requires": { + "amqplib": "0.5.2", + "axios": "0.15.3", + "circular-json": "0.5.4", + "date-format": "1.2.0", + "debug": "3.1.0", + "hipchat-notifier": "1.1.0", + "loggly": "1.1.1", + "mailgun-js": "0.18.0", + "nodemailer": "2.7.2", + "redis": "2.8.0", + "semver": "5.5.0", + "slack-node": "0.2.0", + "streamroller": "0.7.0" + }, + "dependencies": { + "debug": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.1.0.tgz", + "integrity": "sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g==", + "dev": true, + "requires": { + "ms": "2.0.0" + } + } + } + }, + "loggly": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/loggly/-/loggly-1.1.1.tgz", + "integrity": "sha1-Cg/B0/o6XsRP3HuJe+uipGlc6+4=", + "dev": true, + "optional": true, + "requires": { + "json-stringify-safe": "5.0.1", + "request": "2.75.0", + "timespan": "2.3.0" + }, + "dependencies": { + "assert-plus": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-0.2.0.tgz", + "integrity": "sha1-104bh+ev/A24qttwIfP+SBAasjQ=", + "dev": true, + "optional": true + }, + "aws-sign2": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.6.0.tgz", + "integrity": "sha1-FDQt0428yU0OW4fXY81jYSwOeU8=", + "dev": true, + "optional": true + }, + "caseless": { + "version": "0.11.0", + "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.11.0.tgz", + "integrity": "sha1-cVuW6phBWTzDMGeSP17GDr2k99c=", + "dev": true, + "optional": true + }, + "form-data": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.0.0.tgz", + "integrity": "sha1-bwrrrcxdoWwT4ezBETfYX5uIOyU=", + "dev": true, + "optional": true, + "requires": { + "asynckit": "0.4.0", + "combined-stream": "1.0.6", + "mime-types": "2.1.18" + } + }, + "har-validator": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/har-validator/-/har-validator-2.0.6.tgz", + "integrity": "sha1-zcvAgYgmWtEZtqWnyKtw7s+10n0=", + "dev": true, + "optional": true, + "requires": { + "chalk": "1.1.3", + "commander": "2.15.1", + "is-my-json-valid": "2.17.2", + "pinkie-promise": "2.0.1" + } + }, + "http-signature": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.1.1.tgz", + "integrity": "sha1-33LiZwZs0Kxn+3at+OE0qPvPkb8=", + "dev": true, + "optional": true, + "requires": { + "assert-plus": "0.2.0", + "jsprim": "1.4.1", + "sshpk": "1.14.1" + } + }, + "node-uuid": { + "version": "1.4.8", + "resolved": "https://registry.npmjs.org/node-uuid/-/node-uuid-1.4.8.tgz", + "integrity": "sha1-sEDrCSOWivq/jTL7HxfxFn/auQc=", + "dev": true, + "optional": true + }, + "qs": { + "version": "6.2.3", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.2.3.tgz", + "integrity": "sha1-HPyyXBCpsrSDBT/zn138kjOQjP4=", + "dev": true, + "optional": true + }, + "request": { + "version": "2.75.0", + "resolved": "https://registry.npmjs.org/request/-/request-2.75.0.tgz", + "integrity": "sha1-0rgmiihtoT6qXQGt9dGMyQ9lfZM=", + "dev": true, + "optional": true, + "requires": { + "aws-sign2": "0.6.0", + "aws4": "1.7.0", + "bl": "1.1.2", + "caseless": "0.11.0", + "combined-stream": "1.0.6", + "extend": "3.0.1", + "forever-agent": "0.6.1", + "form-data": "2.0.0", + "har-validator": "2.0.6", + "hawk": "3.1.3", + "http-signature": "1.1.1", + "is-typedarray": "1.0.0", + "isstream": "0.1.2", + "json-stringify-safe": "5.0.1", + "mime-types": "2.1.18", + "node-uuid": "1.4.8", + "oauth-sign": "0.8.2", + "qs": "6.2.3", + "stringstream": "0.0.6", + "tough-cookie": "2.3.4", + "tunnel-agent": "0.4.3" + } + }, + "tunnel-agent": { + "version": "0.4.3", + "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.4.3.tgz", + "integrity": "sha1-Y3PbdpCf5XDgjXNYM2Xtgop07us=", + "dev": true, + "optional": true + } + } + }, + "lru-cache": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-4.1.3.tgz", + "integrity": "sha512-fFEhvcgzuIoJVUF8fYr5KR0YqxD238zgObTps31YdADwPPAp82a4M8TrckkWyx7ekNlf9aBcVn81cFwwXngrJA==", + "dev": true, + "optional": true, + "requires": { + "pseudomap": "1.0.2", + "yallist": "2.1.2" + } + }, + "lru-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/lru-queue/-/lru-queue-0.1.0.tgz", + "integrity": "sha1-Jzi9nw089PhEkMVzbEhpmsYyzaM=", + "dev": true, + "requires": { + "es5-ext": "0.10.44" + } + }, + "mailcomposer": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/mailcomposer/-/mailcomposer-4.0.1.tgz", + "integrity": "sha1-DhxEsqB890DuF9wUm6AJ8Zyt/rQ=", + "dev": true, + "optional": true, + "requires": { + "buildmail": "4.0.1", + "libmime": "3.0.0" + } + }, + "mailgun-js": { + "version": "0.18.0", + "resolved": "https://registry.npmjs.org/mailgun-js/-/mailgun-js-0.18.0.tgz", + "integrity": "sha512-o0P6jjZlx5CQj12tvVgDTbgjTqVN0+5h6/6P1+3c6xmozVKBwniQ6Qt3MkCSF0+ueVTbobAfWyGpWRZMJu8t1g==", + "dev": true, + "optional": true, + "requires": { + "async": "2.6.1", + "debug": "3.1.0", + "form-data": "2.3.2", + "inflection": "1.12.0", + "is-stream": "1.1.0", + "path-proxy": "1.0.0", + "promisify-call": "2.0.4", + "proxy-agent": "3.0.0", + "tsscmp": "1.0.5" + }, + "dependencies": { + "debug": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.1.0.tgz", + "integrity": "sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g==", + "dev": true, + "optional": true, + "requires": { + "ms": "2.0.0" + } + } + } + }, + "math-random": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/math-random/-/math-random-1.0.1.tgz", + "integrity": "sha1-izqsWIuKZuSXXjzepn97sylgH6w=", + "dev": true + }, + "media-typer": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", + "integrity": "sha1-hxDXrwqmJvj/+hzgAWhUUmMlV0g=", + "dev": true + }, + "memoizee": { + "version": "0.3.10", + "resolved": "https://registry.npmjs.org/memoizee/-/memoizee-0.3.10.tgz", + "integrity": "sha1-TsoNiu057J0Bf0xcLy9kMvQuXI8=", + "dev": true, + "requires": { + "d": "0.1.1", + "es5-ext": "0.10.44", + "es6-weak-map": "0.1.4", + "event-emitter": "0.3.5", + "lru-queue": "0.1.0", + "next-tick": "0.2.2", + "timers-ext": "0.1.5" + }, + "dependencies": { + "next-tick": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/next-tick/-/next-tick-0.2.2.tgz", + "integrity": "sha1-ddpKkn7liH45BliABltzNkE7MQ0=", + "dev": true + } + } + }, + "micromatch": { + "version": "2.3.11", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-2.3.11.tgz", + "integrity": "sha1-hmd8l9FyCzY0MdBNDRUpO9OMFWU=", + "dev": true, + "requires": { + "arr-diff": "2.0.0", + "array-unique": "0.2.1", + "braces": "1.8.5", + "expand-brackets": "0.1.5", + "extglob": "0.3.2", + "filename-regex": "2.0.1", + "is-extglob": "1.0.0", + "is-glob": "2.0.1", + "kind-of": "3.2.2", + "normalize-path": "2.1.1", + "object.omit": "2.0.1", + "parse-glob": "3.0.4", + "regex-cache": "0.4.4" + } + }, + "mime": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", + "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==", + "dev": true + }, + "mime-db": { + "version": "1.33.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.33.0.tgz", + "integrity": "sha512-BHJ/EKruNIqJf/QahvxwQZXKygOQ256myeN/Ew+THcAa5q+PjyTTMMeNQC4DZw5AwfvelsUrA6B67NKMqXDbzQ==", + "dev": true + }, + "mime-types": { + "version": "2.1.18", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.18.tgz", + "integrity": "sha512-lc/aahn+t4/SWV/qcmumYjymLsWfN3ELhpmVuUFjgsORruuZPVSwAQryq+HHGvO/SI2KVX26bx+En+zhM8g8hQ==", + "dev": true, + "requires": { + "mime-db": "1.33.0" + } + }, + "minimatch": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", + "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", + "dev": true, + "requires": { + "brace-expansion": "1.1.11" + } + }, + "minimist": { + "version": "0.0.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-0.0.8.tgz", + "integrity": "sha1-hX/Kv8M5fSYluCKCYuhqp6ARsF0=", + "dev": true + }, + "mkdirp": { + "version": "0.5.1", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.1.tgz", + "integrity": "sha1-MAV0OOrGz3+MR2fzhkjWaX11yQM=", + "dev": true, + "requires": { + "minimist": "0.0.8" + } + }, + "mkpath": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/mkpath/-/mkpath-0.1.0.tgz", + "integrity": "sha1-dVSm+Nhxg0zJe1RisSLEwSTW3pE=", + "dev": true + }, + "mout": { + "version": "0.9.1", + "resolved": "https://registry.npmjs.org/mout/-/mout-0.9.1.tgz", + "integrity": "sha1-hPDz/WrMcxf2PeKv/cwM7gCbBHc=", + "dev": true + }, + "ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", + "dev": true + }, + "mute-stream": { + "version": "0.0.4", + "resolved": "https://registry.npmjs.org/mute-stream/-/mute-stream-0.0.4.tgz", + "integrity": "sha1-qSGZYKbV1dBGWXruUSUsZlX3F34=", + "dev": true + }, + "nan": { + "version": "2.10.0", + "resolved": "https://registry.npmjs.org/nan/-/nan-2.10.0.tgz", + "integrity": "sha512-bAdJv7fBLhWC+/Bls0Oza+mvTaNQtP+1RyhhhvD95pgUJz6XM5IzgmxOkItJ9tkoCiplvAnXI1tNmmUD/eScyA==", + "dev": true, + "optional": true + }, + "natives": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/natives/-/natives-1.1.4.tgz", + "integrity": "sha512-Q29yeg9aFKwhLVdkTAejM/HvYG0Y1Am1+HUkFQGn5k2j8GS+v60TVmZh6nujpEAj/qql+wGUrlryO8bF+b1jEg==", + "dev": true + }, + "negotiator": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.1.tgz", + "integrity": "sha1-KzJxhOiZIQEXeyhWP7XnECrNDKk=", + "dev": true + }, + "netmask": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/netmask/-/netmask-1.0.6.tgz", + "integrity": "sha1-ICl+idhvb2QA8lDZ9Pa0wZRfzTU=", + "dev": true, + "optional": true + }, + "next-tick": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/next-tick/-/next-tick-1.0.0.tgz", + "integrity": "sha1-yobR/ogoFpsBICCOPchCS524NCw=", + "dev": true + }, + "node-fs": { + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/node-fs/-/node-fs-0.1.7.tgz", + "integrity": "sha1-MjI8zLRsn78PwRgS1FAhzDHTJbs=", + "dev": true + }, + "nodemailer": { + "version": "2.7.2", + "resolved": "https://registry.npmjs.org/nodemailer/-/nodemailer-2.7.2.tgz", + "integrity": "sha1-8kLmSa7q45tsftdA73sGHEBNMPk=", + "dev": true, + "optional": true, + "requires": { + "libmime": "3.0.0", + "mailcomposer": "4.0.1", + "nodemailer-direct-transport": "3.3.2", + "nodemailer-shared": "1.1.0", + "nodemailer-smtp-pool": "2.8.2", + "nodemailer-smtp-transport": "2.7.2", + "socks": "1.1.9" + }, + "dependencies": { + "socks": { + "version": "1.1.9", + "resolved": "https://registry.npmjs.org/socks/-/socks-1.1.9.tgz", + "integrity": "sha1-Yo1+TQSRJDVEWsC25Fk3bLPm1pE=", + "dev": true, + "optional": true, + "requires": { + "ip": "1.1.5", + "smart-buffer": "1.1.15" + } + } + } + }, + "nodemailer-direct-transport": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/nodemailer-direct-transport/-/nodemailer-direct-transport-3.3.2.tgz", + "integrity": "sha1-6W+vuQNYVglH5WkBfZfmBzilCoY=", + "dev": true, + "optional": true, + "requires": { + "nodemailer-shared": "1.1.0", + "smtp-connection": "2.12.0" + } + }, + "nodemailer-fetch": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/nodemailer-fetch/-/nodemailer-fetch-1.6.0.tgz", + "integrity": "sha1-ecSQihwPXzdbc/6IjamCj23JY6Q=", + "dev": true + }, + "nodemailer-shared": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/nodemailer-shared/-/nodemailer-shared-1.1.0.tgz", + "integrity": "sha1-z1mU4v0mjQD1zw+nZ6CBae2wfsA=", + "dev": true, + "requires": { + "nodemailer-fetch": "1.6.0" + } + }, + "nodemailer-smtp-pool": { + "version": "2.8.2", + "resolved": "https://registry.npmjs.org/nodemailer-smtp-pool/-/nodemailer-smtp-pool-2.8.2.tgz", + "integrity": "sha1-LrlNbPhXgLG0clzoU7nL1ejajHI=", + "dev": true, + "optional": true, + "requires": { + "nodemailer-shared": "1.1.0", + "nodemailer-wellknown": "0.1.10", + "smtp-connection": "2.12.0" + } + }, + "nodemailer-smtp-transport": { + "version": "2.7.2", + "resolved": "https://registry.npmjs.org/nodemailer-smtp-transport/-/nodemailer-smtp-transport-2.7.2.tgz", + "integrity": "sha1-A9ccdjFPFKx9vHvwM6am0W1n+3c=", + "dev": true, + "optional": true, + "requires": { + "nodemailer-shared": "1.1.0", + "nodemailer-wellknown": "0.1.10", + "smtp-connection": "2.12.0" + } + }, + "nodemailer-wellknown": { + "version": "0.1.10", + "resolved": "https://registry.npmjs.org/nodemailer-wellknown/-/nodemailer-wellknown-0.1.10.tgz", + "integrity": "sha1-WG24EB2zDLRDjrVGc3pBqtDPE9U=", + "dev": true + }, + "nopt": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/nopt/-/nopt-2.1.2.tgz", + "integrity": "sha1-bMzZd7gBMqB3MdbozljCyDA8+a8=", + "dev": true, + "requires": { + "abbrev": "1.0.9" + } + }, + "normalize-path": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-2.1.1.tgz", + "integrity": "sha1-GrKLVW4Zg2Oowab35vogE3/mrtk=", + "dev": true, + "requires": { + "remove-trailing-separator": "1.1.0" + } + }, + "npmconf": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/npmconf/-/npmconf-2.1.3.tgz", + "integrity": "sha512-iTK+HI68GceCoGOHAQiJ/ik1iDfI7S+cgyG8A+PP18IU3X83kRhQIRhAUNj4Bp2JMx6Zrt5kCiozYa9uGWTjhA==", + "dev": true, + "requires": { + "config-chain": "1.1.11", + "inherits": "2.0.3", + "ini": "1.3.5", + "mkdirp": "0.5.1", + "nopt": "3.0.6", + "once": "1.3.3", + "osenv": "0.1.0", + "safe-buffer": "5.1.2", + "semver": "4.3.6", + "uid-number": "0.0.5" + }, + "dependencies": { + "nopt": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/nopt/-/nopt-3.0.6.tgz", + "integrity": "sha1-xkZdvwirzU2zWTF/eaxopkayj/k=", + "dev": true, + "requires": { + "abbrev": "1.0.9" + } + }, + "once": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/once/-/once-1.3.3.tgz", + "integrity": "sha1-suJhVXzkwxTsgwTz+oJmPkKXyiA=", + "dev": true, + "requires": { + "wrappy": "1.0.2" + } + }, + "semver": { + "version": "4.3.6", + "resolved": "https://registry.npmjs.org/semver/-/semver-4.3.6.tgz", + "integrity": "sha1-MAvG4OhjdPe6YQaLWx7NV/xlMto=", + "dev": true + } + } + }, + "oauth-sign": { + "version": "0.8.2", + "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.8.2.tgz", + "integrity": "sha1-Rqarfwrq2N6unsBWV4C31O/rnUM=", + "dev": true + }, + "object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM=", + "dev": true + }, + "object-component": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/object-component/-/object-component-0.0.3.tgz", + "integrity": "sha1-8MaapQ78lbhmwYb0AKM3acsvEpE=", + "dev": true + }, + "object.omit": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/object.omit/-/object.omit-2.0.1.tgz", + "integrity": "sha1-Gpx0SCnznbuFjHbKNXmuKlTr0fo=", + "dev": true, + "requires": { + "for-own": "0.1.5", + "is-extendable": "0.1.1" + } + }, + "on-finished": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz", + "integrity": "sha1-IPEzZIGwg811M3mSoWlxqi2QaUc=", + "dev": true, + "requires": { + "ee-first": "1.1.1" + } + }, + "once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", + "dev": true, + "requires": { + "wrappy": "1.0.2" + } + }, + "opn": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/opn/-/opn-1.0.2.tgz", + "integrity": "sha1-uQlkM0bQChq8l3qLlvPOPFPVz18=", + "dev": true + }, + "optimist": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/optimist/-/optimist-0.6.1.tgz", + "integrity": "sha1-2j6nRob6IaGaERwybpDrFaAZZoY=", + "dev": true, + "requires": { + "minimist": "0.0.8", + "wordwrap": "0.0.3" + }, + "dependencies": { + "wordwrap": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-0.0.3.tgz", + "integrity": "sha1-o9XabNXAvAAI03I0u68b7WMFkQc=", + "dev": true + } + } + }, + "optionator": { + "version": "0.8.2", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.8.2.tgz", + "integrity": "sha1-NkxeQJ0/TWMB1sC0wFu6UBgK62Q=", + "dev": true, + "optional": true, + "requires": { + "deep-is": "0.1.3", + "fast-levenshtein": "2.0.6", + "levn": "0.3.0", + "prelude-ls": "1.1.2", + "type-check": "0.3.2", + "wordwrap": "1.0.0" + } + }, + "os-name": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/os-name/-/os-name-1.0.3.tgz", + "integrity": "sha1-GzefZINa98Wn9JizV8uVIVwVnt8=", + "dev": true, + "requires": { + "osx-release": "1.1.0", + "win-release": "1.1.1" + } + }, + "os-tmpdir": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz", + "integrity": "sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ=", + "dev": true + }, + "osenv": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/osenv/-/osenv-0.1.0.tgz", + "integrity": "sha1-YWaBIe7FhJVQMLn0cLHSMJUEv8s=", + "dev": true + }, + "osx-release": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/osx-release/-/osx-release-1.1.0.tgz", + "integrity": "sha1-8heRGigTaUmvG/kwiyQeJzfTzWw=", + "dev": true, + "requires": { + "minimist": "1.2.0" + }, + "dependencies": { + "minimist": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.0.tgz", + "integrity": "sha1-o1AIsg9BOD7sH7kU9M1d95omQoQ=", + "dev": true + } + } + }, + "p-throttler": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/p-throttler/-/p-throttler-0.1.0.tgz", + "integrity": "sha1-GxaQeULDM+bx3eq8s0eSBLjEF8Q=", + "dev": true, + "requires": { + "q": "0.9.7" + }, + "dependencies": { + "q": { + "version": "0.9.7", + "resolved": "https://registry.npmjs.org/q/-/q-0.9.7.tgz", + "integrity": "sha1-TeLmyzspCIyeTLwDv51C+5bOL3U=", + "dev": true + } + } + }, + "pac-proxy-agent": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/pac-proxy-agent/-/pac-proxy-agent-2.0.2.tgz", + "integrity": "sha512-cDNAN1Ehjbf5EHkNY5qnRhGPUCp6SnpyVof5fRzN800QV1Y2OkzbH9rmjZkbBRa8igof903yOnjIl6z0SlAhxA==", + "dev": true, + "optional": true, + "requires": { + "agent-base": "4.2.0", + "debug": "3.1.0", + "get-uri": "2.0.2", + "http-proxy-agent": "2.1.0", + "https-proxy-agent": "2.2.1", + "pac-resolver": "3.0.0", + "raw-body": "2.3.3", + "socks-proxy-agent": "3.0.1" + }, + "dependencies": { + "debug": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.1.0.tgz", + "integrity": "sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g==", + "dev": true, + "optional": true, + "requires": { + "ms": "2.0.0" + } + } + } + }, + "pac-resolver": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pac-resolver/-/pac-resolver-3.0.0.tgz", + "integrity": "sha512-tcc38bsjuE3XZ5+4vP96OfhOugrX+JcnpUbhfuc4LuXBLQhoTthOstZeoQJBDnQUDYzYmdImKsbz0xSl1/9qeA==", + "dev": true, + "optional": true, + "requires": { + "co": "4.6.0", + "degenerator": "1.0.4", + "ip": "1.1.5", + "netmask": "1.0.6", + "thunkify": "2.1.2" + } + }, + "package-json": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/package-json/-/package-json-0.2.0.tgz", + "integrity": "sha1-Axbhd7jrFJmF009wa0pVQ7J0vsU=", + "dev": true, + "requires": { + "got": "0.3.0", + "registry-url": "0.1.1" + } + }, + "parse-glob": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/parse-glob/-/parse-glob-3.0.4.tgz", + "integrity": "sha1-ssN2z7EfNVE7rdFz7wu246OIORw=", + "dev": true, + "requires": { + "glob-base": "0.3.0", + "is-dotfile": "1.0.3", + "is-extglob": "1.0.0", + "is-glob": "2.0.1" + } + }, + "parseqs": { + "version": "0.0.5", + "resolved": "https://registry.npmjs.org/parseqs/-/parseqs-0.0.5.tgz", + "integrity": "sha1-1SCKNzjkZ2bikbouoXNoSSGouJ0=", + "dev": true, + "requires": { + "better-assert": "1.0.2" + } + }, + "parseuri": { + "version": "0.0.5", + "resolved": "https://registry.npmjs.org/parseuri/-/parseuri-0.0.5.tgz", + "integrity": "sha1-gCBKUNTbt3m/3G6+J3jZDkvOMgo=", + "dev": true, + "requires": { + "better-assert": "1.0.2" + } + }, + "parseurl": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.2.tgz", + "integrity": "sha1-/CidTtiZMRlGDBViUyYs3I3mW/M=", + "dev": true + }, + "path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=", + "dev": true + }, + "path-proxy": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/path-proxy/-/path-proxy-1.0.0.tgz", + "integrity": "sha1-GOijaFn8nS8aU7SN7hOFQ8Ag3l4=", + "dev": true, + "optional": true, + "requires": { + "inflection": "1.3.8" + }, + "dependencies": { + "inflection": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/inflection/-/inflection-1.3.8.tgz", + "integrity": "sha1-y9Fg2p91sUw8xjV41POWeEvzAU4=", + "dev": true, + "optional": true + } + } + }, + "pend": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/pend/-/pend-1.2.0.tgz", + "integrity": "sha1-elfrVQpng/kRUzH89GY9XI4AelA=", + "dev": true + }, + "performance-now": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz", + "integrity": "sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns=", + "dev": true + }, + "phantomjs-prebuilt": { + "version": "2.1.16", + "resolved": "https://registry.npmjs.org/phantomjs-prebuilt/-/phantomjs-prebuilt-2.1.16.tgz", + "integrity": "sha1-79ISpKOWbTZHaE6ouniFSb4q7+8=", + "dev": true, + "requires": { + "es6-promise": "4.2.4", + "extract-zip": "1.6.7", + "fs-extra": "1.0.0", + "hasha": "2.2.0", + "kew": "0.7.0", + "progress": "1.1.8", + "request": "2.87.0", + "request-progress": "2.0.1", + "which": "1.3.1" + }, + "dependencies": { + "request-progress": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/request-progress/-/request-progress-2.0.1.tgz", + "integrity": "sha1-XTa7V5YcZzqlt4jbyBQf3yO0Tgg=", + "dev": true, + "requires": { + "throttleit": "1.0.0" + } + }, + "throttleit": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/throttleit/-/throttleit-1.0.0.tgz", + "integrity": "sha1-nnhYNtr0Z0MUWlmEtiaNgoUorGw=", + "dev": true + }, + "which": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz", + "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==", + "dev": true, + "requires": { + "isexe": "2.0.0" + } + } + } + }, + "pinkie": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/pinkie/-/pinkie-2.0.4.tgz", + "integrity": "sha1-clVrgM+g1IqXToDnckjoDtT3+HA=", + "dev": true + }, + "pinkie-promise": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/pinkie-promise/-/pinkie-promise-2.0.1.tgz", + "integrity": "sha1-ITXW36ejWMBprJsXh3YogihFD/o=", + "dev": true, + "requires": { + "pinkie": "2.0.4" + } + }, + "prelude-ls": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.1.2.tgz", + "integrity": "sha1-IZMqVJ9eUv/ZqCf1cOBL5iqX2lQ=", + "dev": true + }, + "preserve": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/preserve/-/preserve-0.2.0.tgz", + "integrity": "sha1-gV7R9uvGWSb4ZbMQwHE7yzMVzks=", + "dev": true + }, + "process-nextick-args": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.0.tgz", + "integrity": "sha512-MtEC1TqN0EU5nephaJ4rAtThHtC86dNN9qCuEhtshvpVBkAW5ZO7BASN9REnF9eoXGcRub+pFuKEpOHE+HbEMw==", + "dev": true + }, + "progress": { + "version": "1.1.8", + "resolved": "https://registry.npmjs.org/progress/-/progress-1.1.8.tgz", + "integrity": "sha1-4mDHj2Fhzdmw5WzD4Khd4Xx6V74=", + "dev": true + }, + "promisify-call": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/promisify-call/-/promisify-call-2.0.4.tgz", + "integrity": "sha1-1IwtRWUszM1SgB3ey9UzptS9X7o=", + "dev": true, + "optional": true, + "requires": { + "with-callback": "1.0.2" + } + }, + "promptly": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/promptly/-/promptly-0.2.0.tgz", + "integrity": "sha1-c+8gD6gynV06jfQXmJULhkbKRtk=", + "dev": true, + "requires": { + "read": "1.0.7" + } + }, + "proto-list": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/proto-list/-/proto-list-1.2.4.tgz", + "integrity": "sha1-IS1b/hMYMGpCD2QCuOJv85ZHqEk=", + "dev": true + }, + "proxy-agent": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/proxy-agent/-/proxy-agent-3.0.0.tgz", + "integrity": "sha512-g6n6vnk8fRf705ShN+FEXFG/SDJaW++lSs0d9KaJh4uBWW/wi7en4Cpo5VYQW3SZzAE121lhB/KLQrbURoubZw==", + "dev": true, + "optional": true, + "requires": { + "agent-base": "4.2.0", + "debug": "3.1.0", + "http-proxy-agent": "2.1.0", + "https-proxy-agent": "2.2.1", + "lru-cache": "4.1.3", + "pac-proxy-agent": "2.0.2", + "proxy-from-env": "1.0.0", + "socks-proxy-agent": "3.0.1" + }, + "dependencies": { + "debug": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.1.0.tgz", + "integrity": "sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g==", + "dev": true, + "optional": true, + "requires": { + "ms": "2.0.0" + } + } + } + }, + "proxy-from-env": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.0.0.tgz", + "integrity": "sha1-M8UDmPcOp+uW0h97gXYwpVeRx+4=", + "dev": true, + "optional": true + }, + "pseudomap": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/pseudomap/-/pseudomap-1.0.2.tgz", + "integrity": "sha1-8FKijacOYYkX7wqKw0wa5aaChrM=", + "dev": true, + "optional": true + }, + "pump": { + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/pump/-/pump-0.3.5.tgz", + "integrity": "sha1-rl/4wfk+2HrcZTCpdWWxJvWFRUs=", + "dev": true, + "requires": { + "end-of-stream": "1.0.0", + "once": "1.2.0" + }, + "dependencies": { + "once": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.2.0.tgz", + "integrity": "sha1-3hkFxjavh0qPuoYtmqvd0fkgRhw=", + "dev": true + } + } + }, + "punycode": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz", + "integrity": "sha1-wNWmOycYgArY4esPpSachN1BhF4=", + "dev": true + }, + "q": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/q/-/q-1.0.1.tgz", + "integrity": "sha1-EYcq7t7okmgRCxCnGESP+xARKhQ=", + "dev": true + }, + "qjobs": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/qjobs/-/qjobs-1.2.0.tgz", + "integrity": "sha512-8YOJEHtxpySA3fFDyCRxA+UUV+fA+rTWnuWvylOK/NCjhY+b4ocCtmu8TtsWb+mYeU+GCHf/S66KZF/AsteKHg==", + "dev": true + }, + "qs": { + "version": "6.5.2", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz", + "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA==", + "dev": true + }, + "randomatic": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/randomatic/-/randomatic-3.0.0.tgz", + "integrity": "sha512-VdxFOIEY3mNO5PtSRkkle/hPJDHvQhK21oa73K4yAc9qmp6N429gAyF1gZMOTMeS0/AYzaV/2Trcef+NaIonSA==", + "dev": true, + "requires": { + "is-number": "4.0.0", + "kind-of": "6.0.2", + "math-random": "1.0.1" + }, + "dependencies": { + "is-number": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-4.0.0.tgz", + "integrity": "sha512-rSklcAIlf1OmFdyAqbnWTLVelsQ58uvZ66S/ZyawjWqIviTWCjg2PzVGw8WUA+nNuPTqb4wgA+NszrJ+08LlgQ==", + "dev": true + }, + "kind-of": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.2.tgz", + "integrity": "sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA==", + "dev": true + } + } + }, + "range-parser": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.0.tgz", + "integrity": "sha1-9JvmtIeJTdxA3MlKMi9hEJLgDV4=", + "dev": true + }, + "raw-body": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.3.3.tgz", + "integrity": "sha512-9esiElv1BrZoI3rCDuOuKCBRbuApGGaDPQfjSflGxdy4oyzqghxu6klEkkVIvBje+FF0BX9coEv8KqW6X/7njw==", + "dev": true, + "requires": { + "bytes": "3.0.0", + "http-errors": "1.6.3", + "iconv-lite": "0.4.23", + "unpipe": "1.0.0" + } + }, + "read": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/read/-/read-1.0.7.tgz", + "integrity": "sha1-s9oZvQUkMal2cdRKQmNK33ELQMQ=", + "dev": true, + "requires": { + "mute-stream": "0.0.4" + } + }, + "readable-stream": { + "version": "2.3.6", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.6.tgz", + "integrity": "sha512-tQtKA9WIAhBF3+VLAseyMqZeBjW0AHJoxOtYqSUZNJxauErmLbVm2FW1y+J/YA9dUrAC39ITejlZWhVIwawkKw==", + "dev": true, + "requires": { + "core-util-is": "1.0.2", + "inherits": "2.0.3", + "isarray": "1.0.0", + "process-nextick-args": "2.0.0", + "safe-buffer": "5.1.2", + "string_decoder": "1.1.1", + "util-deprecate": "1.0.2" + } + }, + "readdirp": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-2.1.0.tgz", + "integrity": "sha1-TtCtBg3zBzMAxIRANz9y0cxkLXg=", + "dev": true, + "requires": { + "graceful-fs": "4.1.11", + "minimatch": "3.0.4", + "readable-stream": "2.3.6", + "set-immediate-shim": "1.0.1" + } + }, + "readline2": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/readline2/-/readline2-0.1.1.tgz", + "integrity": "sha1-mUQ7pug7gw7zBRv9fcJBqCco1Wg=", + "dev": true, + "requires": { + "mute-stream": "0.0.4", + "strip-ansi": "2.0.1" + }, + "dependencies": { + "ansi-regex": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-1.1.1.tgz", + "integrity": "sha1-QchHGUZGN15qGl0Qw8oFTvn8mA0=", + "dev": true + }, + "strip-ansi": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-2.0.1.tgz", + "integrity": "sha1-32LBqpTtLxFOHQ8h/R1QSCt5pg4=", + "dev": true, + "requires": { + "ansi-regex": "1.1.1" + } + } + } + }, + "redeyed": { + "version": "0.4.4", + "resolved": "https://registry.npmjs.org/redeyed/-/redeyed-0.4.4.tgz", + "integrity": "sha1-N+mQpvKyGyoRwuakj9QTVpjLqX8=", + "dev": true, + "requires": { + "esprima": "1.0.4" + }, + "dependencies": { + "esprima": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-1.0.4.tgz", + "integrity": "sha1-n1V+CPw7TSbs6d00+Pv0drYlha0=", + "dev": true + } + } + }, + "redis": { + "version": "2.8.0", + "resolved": "https://registry.npmjs.org/redis/-/redis-2.8.0.tgz", + "integrity": "sha512-M1OkonEQwtRmZv4tEWF2VgpG0JWJ8Fv1PhlgT5+B+uNq2cA3Rt1Yt/ryoR+vQNOQcIEgdCdfH0jr3bDpihAw1A==", + "dev": true, + "optional": true, + "requires": { + "double-ended-queue": "2.1.0-0", + "redis-commands": "1.3.5", + "redis-parser": "2.6.0" + } + }, + "redis-commands": { + "version": "1.3.5", + "resolved": "https://registry.npmjs.org/redis-commands/-/redis-commands-1.3.5.tgz", + "integrity": "sha512-foGF8u6MXGFF++1TZVC6icGXuMYPftKXt1FBT2vrfU9ZATNtZJ8duRC5d1lEfE8hyVe3jhelHGB91oB7I6qLsA==", + "dev": true, + "optional": true + }, + "redis-parser": { + "version": "2.6.0", + "resolved": "https://registry.npmjs.org/redis-parser/-/redis-parser-2.6.0.tgz", + "integrity": "sha1-Uu0J2srBCPGmMcB+m2mUHnoZUEs=", + "dev": true, + "optional": true + }, + "regex-cache": { + "version": "0.4.4", + "resolved": "https://registry.npmjs.org/regex-cache/-/regex-cache-0.4.4.tgz", + "integrity": "sha512-nVIZwtCjkC9YgvWkpM55B5rBhBYRZhAaJbgcFYXXsHnbZ9UZI9nnVWYZpBlCqv9ho2eZryPnWrZGsOdPwVWXWQ==", + "dev": true, + "requires": { + "is-equal-shallow": "0.1.3" + } + }, + "registry-url": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/registry-url/-/registry-url-0.1.1.tgz", + "integrity": "sha1-FzlCe4GxELMCSCocfNcn/8yC1b4=", + "dev": true, + "requires": { + "npmconf": "2.1.3" + } + }, + "remove-trailing-separator": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/remove-trailing-separator/-/remove-trailing-separator-1.1.0.tgz", + "integrity": "sha1-wkvOKig62tW8P1jg1IJJuSN52O8=", + "dev": true + }, + "repeat-element": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/repeat-element/-/repeat-element-1.1.2.tgz", + "integrity": "sha1-7wiaF40Ug7quTZPrmLT55OEdmQo=", + "dev": true + }, + "repeat-string": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz", + "integrity": "sha1-jcrkcOHIirwtYA//Sndihtp15jc=", + "dev": true + }, + "request": { + "version": "2.87.0", + "resolved": "https://registry.npmjs.org/request/-/request-2.87.0.tgz", + "integrity": "sha512-fcogkm7Az5bsS6Sl0sibkbhcKsnyon/jV1kF3ajGmF0c8HrttdKTPRT9hieOaQHA5HEq6r8OyWOo/o781C1tNw==", + "dev": true, + "requires": { + "aws-sign2": "0.7.0", + "aws4": "1.7.0", + "caseless": "0.12.0", + "combined-stream": "1.0.6", + "extend": "3.0.1", + "forever-agent": "0.6.1", + "form-data": "2.3.2", + "har-validator": "5.0.3", + "http-signature": "1.2.0", + "is-typedarray": "1.0.0", + "isstream": "0.1.2", + "json-stringify-safe": "5.0.1", + "mime-types": "2.1.18", + "oauth-sign": "0.8.2", + "performance-now": "2.1.0", + "qs": "6.5.2", + "safe-buffer": "5.1.2", + "tough-cookie": "2.3.4", + "tunnel-agent": "0.6.0", + "uuid": "3.2.1" + } + }, + "request-progress": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/request-progress/-/request-progress-0.3.0.tgz", + "integrity": "sha1-vfIGK/wZfF1JJQDUTLOv94ZbSS4=", + "dev": true, + "requires": { + "throttleit": "0.0.2" + } + }, + "request-replay": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/request-replay/-/request-replay-0.2.0.tgz", + "integrity": "sha1-m2k6XRGLOfXFlurV7ZGiZEQFf2A=", + "dev": true, + "requires": { + "retry": "0.6.0" + } + }, + "requestretry": { + "version": "1.13.0", + "resolved": "https://registry.npmjs.org/requestretry/-/requestretry-1.13.0.tgz", + "integrity": "sha512-Lmh9qMvnQXADGAQxsXHP4rbgO6pffCfuR8XUBdP9aitJcLQJxhp7YZK4xAVYXnPJ5E52mwrfiKQtKonPL8xsmg==", + "dev": true, + "optional": true, + "requires": { + "extend": "3.0.1", + "lodash": "4.17.10", + "request": "2.87.0", + "when": "3.7.8" + } + }, + "requires-port": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", + "integrity": "sha1-kl0mAdOaxIXgkc8NpcbmlNw9yv8=", + "dev": true + }, + "retry": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/retry/-/retry-0.6.0.tgz", + "integrity": "sha1-HAEHEyeab9Ho3vKK8MP/GHHKpTc=", + "dev": true + }, + "rimraf": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.6.2.tgz", + "integrity": "sha512-lreewLK/BlghmxtfH36YYVg1i8IAce4TI7oao75I1g245+6BctqTVQiBP3YUJ9C6DQOXJmkYR9X9fCLtCOJc5w==", + "dev": true, + "requires": { + "glob": "7.1.2" + } + }, + "rx": { + "version": "2.5.3", + "resolved": "https://registry.npmjs.org/rx/-/rx-2.5.3.tgz", + "integrity": "sha1-Ia3H2A8CACr1Da6X/Z2/JIdV9WY=", + "dev": true + }, + "safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", + "dev": true + }, + "safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "dev": true + }, + "semver": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.5.0.tgz", + "integrity": "sha512-4SJ3dm0WAwWy/NVeioZh5AntkdJoWKxHxcmyP622fOkgHa4z3R0TdBJICINyaSDE6uNwVc8gZr+ZinwZAH4xIA==", + "dev": true + }, + "semver-diff": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/semver-diff/-/semver-diff-0.1.0.tgz", + "integrity": "sha1-T2BXyj66I8xIS1H2Sq+IsTGjhV0=", + "dev": true, + "requires": { + "semver": "2.3.2" + }, + "dependencies": { + "semver": { + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-2.3.2.tgz", + "integrity": "sha1-uYSPJdbPNjMwc+ye+IVtQvEjPlI=", + "dev": true + } + } + }, + "set-immediate-shim": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/set-immediate-shim/-/set-immediate-shim-1.0.1.tgz", + "integrity": "sha1-SysbJ+uAip+NzEgaWOXlb1mfP2E=", + "dev": true + }, + "setprototypeof": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz", + "integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ==", + "dev": true + }, + "shell-quote": { + "version": "1.4.3", + "resolved": "https://registry.npmjs.org/shell-quote/-/shell-quote-1.4.3.tgz", + "integrity": "sha1-lSxE4LHtkBPvU5WBecxkPod3Rms=", + "dev": true, + "requires": { + "array-filter": "0.0.1", + "array-map": "0.0.0", + "array-reduce": "0.0.0", + "jsonify": "0.0.0" + } + }, + "sigmund": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/sigmund/-/sigmund-1.0.1.tgz", + "integrity": "sha1-P/IfGYytIXX587eBhT/ZTQ0ZtZA=", + "dev": true + }, + "signal-exit": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.2.tgz", + "integrity": "sha1-tf3AjxKH6hF4Yo5BXiUTK3NkbG0=", + "dev": true + }, + "slack-node": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/slack-node/-/slack-node-0.2.0.tgz", + "integrity": "sha1-3kuN3aqLeT9h29KTgQT9q/N9+jA=", + "dev": true, + "optional": true, + "requires": { + "requestretry": "1.13.0" + } + }, + "smart-buffer": { + "version": "1.1.15", + "resolved": "https://registry.npmjs.org/smart-buffer/-/smart-buffer-1.1.15.tgz", + "integrity": "sha1-fxFLW2X6s+KjWqd1uxLw0cZJvxY=", + "dev": true + }, + "smtp-connection": { + "version": "2.12.0", + "resolved": "https://registry.npmjs.org/smtp-connection/-/smtp-connection-2.12.0.tgz", + "integrity": "sha1-1275EnyyPCJZ7bHoNJwujV4tdME=", + "dev": true, + "requires": { + "httpntlm": "1.6.1", + "nodemailer-shared": "1.1.0" + } + }, + "sntp": { + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/sntp/-/sntp-1.0.9.tgz", + "integrity": "sha1-ZUEYTMkK7qbG57NeJlkIJEPGYZg=", + "dev": true, + "optional": true, + "requires": { + "hoek": "2.16.3" + } + }, + "socket.io": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/socket.io/-/socket.io-2.0.4.tgz", + "integrity": "sha1-waRZDO/4fs8TxyZS8Eb3FrKeYBQ=", + "dev": true, + "requires": { + "debug": "2.6.9", + "engine.io": "3.1.5", + "socket.io-adapter": "1.1.1", + "socket.io-client": "2.0.4", + "socket.io-parser": "3.1.3" + } + }, + "socket.io-adapter": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/socket.io-adapter/-/socket.io-adapter-1.1.1.tgz", + "integrity": "sha1-KoBeihTWNyEk3ZFZrUUC+MsH8Gs=", + "dev": true + }, + "socket.io-client": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/socket.io-client/-/socket.io-client-2.0.4.tgz", + "integrity": "sha1-CRilUkBtxeVAs4Dc2Xr8SmQzL44=", + "dev": true, + "requires": { + "backo2": "1.0.2", + "base64-arraybuffer": "0.1.5", + "component-bind": "1.0.0", + "component-emitter": "1.2.1", + "debug": "2.6.9", + "engine.io-client": "3.1.6", + "has-cors": "1.1.0", + "indexof": "0.0.1", + "object-component": "0.0.3", + "parseqs": "0.0.5", + "parseuri": "0.0.5", + "socket.io-parser": "3.1.3", + "to-array": "0.1.4" + } + }, + "socket.io-parser": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/socket.io-parser/-/socket.io-parser-3.1.3.tgz", + "integrity": "sha512-g0a2HPqLguqAczs3dMECuA1RgoGFPyvDqcbaDEdCWY9g59kdUAz3YRmaJBNKXflrHNwB7Q12Gkf/0CZXfdHR7g==", + "dev": true, + "requires": { + "component-emitter": "1.2.1", + "debug": "3.1.0", + "has-binary2": "1.0.3", + "isarray": "2.0.1" + }, + "dependencies": { + "debug": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.1.0.tgz", + "integrity": "sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g==", + "dev": true, + "requires": { + "ms": "2.0.0" + } + }, + "isarray": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.1.tgz", + "integrity": "sha1-o32U7ZzaLVmGXJ92/llu4fM4dB4=", + "dev": true + } + } + }, + "socks": { + "version": "1.1.10", + "resolved": "https://registry.npmjs.org/socks/-/socks-1.1.10.tgz", + "integrity": "sha1-W4t/x8jzQcU+0FbpKbe/Tei6e1o=", + "dev": true, + "requires": { + "ip": "1.1.5", + "smart-buffer": "1.1.15" + } + }, + "socks-proxy-agent": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/socks-proxy-agent/-/socks-proxy-agent-3.0.1.tgz", + "integrity": "sha512-ZwEDymm204mTzvdqyUqOdovVr2YRd2NYskrYrF2LXyZ9qDiMAoFESGK8CRphiO7rtbo2Y757k2Nia3x2hGtalA==", + "dev": true, + "requires": { + "agent-base": "4.2.0", + "socks": "1.1.10" + } + }, + "source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true + }, + "sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=", + "dev": true + }, + "sshpk": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.14.1.tgz", + "integrity": "sha1-Ew9Zde3a2WPx1W+SuaxsUfqfg+s=", + "dev": true, + "requires": { + "asn1": "0.2.3", + "assert-plus": "1.0.0", + "bcrypt-pbkdf": "1.0.1", + "dashdash": "1.14.1", + "ecc-jsbn": "0.1.1", + "getpass": "0.1.7", + "jsbn": "0.1.1", + "tweetnacl": "0.14.5" + } + }, + "statuses": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz", + "integrity": "sha1-Fhx9rBd2Wf2YEfQ3cfqZOBR4Yow=", + "dev": true + }, + "streamroller": { + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/streamroller/-/streamroller-0.7.0.tgz", + "integrity": "sha512-WREzfy0r0zUqp3lGO096wRuUp7ho1X6uo/7DJfTlEi0Iv/4gT7YHqXDjKC2ioVGBZtE8QzsQD9nx1nIuoZ57jQ==", + "dev": true, + "requires": { + "date-format": "1.2.0", + "debug": "3.1.0", + "mkdirp": "0.5.1", + "readable-stream": "2.3.6" + }, + "dependencies": { + "debug": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.1.0.tgz", + "integrity": "sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g==", + "dev": true, + "requires": { + "ms": "2.0.0" + } + } + } + }, + "string-length": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/string-length/-/string-length-0.1.2.tgz", + "integrity": "sha1-qwS7M4Z+50vu1/uJu38InTkngPI=", + "dev": true, + "requires": { + "strip-ansi": "0.2.2" + }, + "dependencies": { + "ansi-regex": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-0.1.0.tgz", + "integrity": "sha1-Vcpg22kAhXxCOukpeYACb5Qe2QM=", + "dev": true + }, + "strip-ansi": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-0.2.2.tgz", + "integrity": "sha1-hU0pDJgVJfyMOXqRCwJa4tVP/Ag=", + "dev": true, + "requires": { + "ansi-regex": "0.1.0" + } + } + } + }, + "string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dev": true, + "requires": { + "safe-buffer": "5.1.2" + } + }, + "stringify-object": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/stringify-object/-/stringify-object-1.0.1.tgz", + "integrity": "sha1-htNefb+86apFY31+zdeEfhWduKI=", + "dev": true + }, + "stringstream": { + "version": "0.0.6", + "resolved": "https://registry.npmjs.org/stringstream/-/stringstream-0.0.6.tgz", + "integrity": "sha512-87GEBAkegbBcweToUrdzf3eLhWNg06FJTebl4BVJz/JgWy8CvEr9dRtX5qWphiynMSQlxxi+QqN0z5T32SLlhA==", + "dev": true + }, + "strip-ansi": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz", + "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=", + "dev": true, + "optional": true, + "requires": { + "ansi-regex": "2.1.1" + } + }, + "supports-color": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-2.0.0.tgz", + "integrity": "sha1-U10EXOa2Nj+kARcIRimZXp3zJMc=", + "dev": true, + "optional": true + }, + "tar-fs": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-0.5.2.tgz", + "integrity": "sha1-D1lCS+fu7kUjIxbjAvZtP26m2z4=", + "dev": true, + "requires": { + "mkdirp": "0.5.1", + "pump": "0.3.5", + "tar-stream": "0.4.7" + } + }, + "tar-stream": { + "version": "0.4.7", + "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-0.4.7.tgz", + "integrity": "sha1-Hx0s6evHtCdlJDyg6PG3v9oKrc0=", + "dev": true, + "requires": { + "bl": "0.9.5", + "end-of-stream": "1.0.0", + "readable-stream": "1.1.14", + "xtend": "4.0.1" + }, + "dependencies": { + "bl": { + "version": "0.9.5", + "resolved": "https://registry.npmjs.org/bl/-/bl-0.9.5.tgz", + "integrity": "sha1-wGt5evCF6gC8Unr8jvzxHeIjIFQ=", + "dev": true, + "requires": { + "readable-stream": "1.0.34" + }, + "dependencies": { + "readable-stream": { + "version": "1.0.34", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-1.0.34.tgz", + "integrity": "sha1-Elgg40vIQtLyqq+v5MKRbuMsFXw=", + "dev": true, + "requires": { + "core-util-is": "1.0.2", + "inherits": "2.0.3", + "isarray": "0.0.1", + "string_decoder": "0.10.31" + } + } + } + }, + "isarray": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz", + "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=", + "dev": true + }, + "readable-stream": { + "version": "1.1.14", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-1.1.14.tgz", + "integrity": "sha1-fPTFTvZI44EwhMY23SB54WbAgdk=", + "dev": true, + "requires": { + "core-util-is": "1.0.2", + "inherits": "2.0.3", + "isarray": "0.0.1", + "string_decoder": "0.10.31" + } + }, + "string_decoder": { + "version": "0.10.31", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz", + "integrity": "sha1-YuIDvEF2bGwoyfyEMB2rHFMQ+pQ=", + "dev": true + } + } + }, + "throttleit": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/throttleit/-/throttleit-0.0.2.tgz", + "integrity": "sha1-z+34jmDADdlpe2H90qg0OptoDq8=", + "dev": true + }, + "through": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", + "integrity": "sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU=", + "dev": true + }, + "thunkify": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/thunkify/-/thunkify-2.1.2.tgz", + "integrity": "sha1-+qDp0jDFGsyVyhOjYawFyn4EVT0=", + "dev": true, + "optional": true + }, + "timers-ext": { + "version": "0.1.5", + "resolved": "https://registry.npmjs.org/timers-ext/-/timers-ext-0.1.5.tgz", + "integrity": "sha512-tsEStd7kmACHENhsUPaxb8Jf8/+GZZxyNFQbZD07HQOyooOa6At1rQqjffgvg7n+dxscQa9cjjMdWhJtsP2sxg==", + "dev": true, + "requires": { + "es5-ext": "0.10.44", + "next-tick": "1.0.0" + } + }, + "timespan": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/timespan/-/timespan-2.3.0.tgz", + "integrity": "sha1-SQLOBAvRPYRcj1myfp1ZutbzmSk=", + "dev": true, + "optional": true + }, + "tmp": { + "version": "0.0.33", + "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.0.33.tgz", + "integrity": "sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw==", + "dev": true, + "requires": { + "os-tmpdir": "1.0.2" + } + }, + "to-array": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/to-array/-/to-array-0.1.4.tgz", + "integrity": "sha1-F+bBH3PdTz10zaek/zI46a2b+JA=", + "dev": true + }, + "touch": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/touch/-/touch-0.0.2.tgz", + "integrity": "sha1-plp3d5Xly74SmUmb3EIoH/shtfQ=", + "dev": true, + "requires": { + "nopt": "1.0.10" + }, + "dependencies": { + "nopt": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/nopt/-/nopt-1.0.10.tgz", + "integrity": "sha1-bd0hvSoxQXuScn3Vhfim83YI6+4=", + "dev": true, + "requires": { + "abbrev": "1.0.9" + } + } + } + }, + "tough-cookie": { + "version": "2.3.4", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.3.4.tgz", + "integrity": "sha512-TZ6TTfI5NtZnuyy/Kecv+CnoROnyXn2DN97LontgQpCwsX2XyLYCC0ENhYkehSOwAp8rTQKc/NUIF7BkQ5rKLA==", + "dev": true, + "requires": { + "punycode": "1.4.1" + } + }, + "traverse": { + "version": "0.3.9", + "resolved": "https://registry.npmjs.org/traverse/-/traverse-0.3.9.tgz", + "integrity": "sha1-cXuPIgzAu3tE5AUUwisui7xw2Lk=", + "dev": true + }, + "tsscmp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/tsscmp/-/tsscmp-1.0.5.tgz", + "integrity": "sha1-fcSjOvcVgatDN9qR2FylQn69mpc=", + "dev": true, + "optional": true + }, + "tunnel-agent": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", + "integrity": "sha1-J6XeoGs2sEoKmWZ3SykIaPD8QP0=", + "dev": true, + "requires": { + "safe-buffer": "5.1.2" + } + }, + "tweetnacl": { + "version": "0.14.5", + "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz", + "integrity": "sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q=", + "dev": true, + "optional": true + }, + "type-check": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.3.2.tgz", + "integrity": "sha1-WITKtRLPHTVeP7eE8wgEsrUg23I=", + "dev": true, + "requires": { + "prelude-ls": "1.1.2" + } + }, + "type-is": { + "version": "1.6.16", + "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.16.tgz", + "integrity": "sha512-HRkVv/5qY2G6I8iab9cI7v1bOIdhm94dVjQCPFElW9W+3GeDOSHmy2EBYe4VTApuzolPcmgFTN3ftVJRKR2J9Q==", + "dev": true, + "requires": { + "media-typer": "0.3.0", + "mime-types": "2.1.18" + } + }, + "typedarray": { + "version": "0.0.6", + "resolved": "https://registry.npmjs.org/typedarray/-/typedarray-0.0.6.tgz", + "integrity": "sha1-hnrHTjhkGHsdPUfZlqeOxciDB3c=", + "dev": true + }, + "uglify-js": { + "version": "2.3.6", + "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-2.3.6.tgz", + "integrity": "sha1-+gmEdwtCi3qbKoBY9GNV0U/vIRo=", + "dev": true, + "optional": true, + "requires": { + "async": "0.2.10", + "optimist": "0.3.7", + "source-map": "0.1.43" + }, + "dependencies": { + "async": { + "version": "0.2.10", + "resolved": "https://registry.npmjs.org/async/-/async-0.2.10.tgz", + "integrity": "sha1-trvgsGdLnXGXCMo43owjfLUmw9E=", + "dev": true, + "optional": true + }, + "optimist": { + "version": "0.3.7", + "resolved": "https://registry.npmjs.org/optimist/-/optimist-0.3.7.tgz", + "integrity": "sha1-yQlBrVnkJzMokjB00s8ufLxuwNk=", + "dev": true, + "optional": true, + "requires": { + "wordwrap": "0.0.3" + } + }, + "source-map": { + "version": "0.1.43", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.1.43.tgz", + "integrity": "sha1-wkvBRspRfBRx9drL4lcbK3+eM0Y=", + "dev": true, + "optional": true, + "requires": { + "amdefine": "1.0.1" + } + }, + "wordwrap": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-0.0.3.tgz", + "integrity": "sha1-o9XabNXAvAAI03I0u68b7WMFkQc=", + "dev": true, + "optional": true + } + } + }, + "uid-number": { + "version": "0.0.5", + "resolved": "https://registry.npmjs.org/uid-number/-/uid-number-0.0.5.tgz", + "integrity": "sha1-Wj2yPvXb1VuB/ODsmirG/M3ruB4=", + "dev": true + }, + "ultron": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/ultron/-/ultron-1.1.1.tgz", + "integrity": "sha512-UIEXBNeYmKptWH6z8ZnqTeS8fV74zG0/eRU9VGkpzz+LIJNs8W/zM/L+7ctCkRrgbNnnR0xxw4bKOr0cW0N0Og==", + "dev": true + }, + "underscore": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/underscore/-/underscore-1.7.0.tgz", + "integrity": "sha1-a7rwh3UA02vjTsqlhODbn+8DUgk=", + "dev": true + }, + "unpipe": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", + "integrity": "sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw=", + "dev": true + }, + "update-notifier": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/update-notifier/-/update-notifier-0.2.0.tgz", + "integrity": "sha1-oBDJKK3PAgkLjgzn/vb7Cnysw0o=", + "dev": true, + "requires": { + "chalk": "0.5.1", + "configstore": "0.3.2", + "latest-version": "0.2.0", + "semver-diff": "0.1.0", + "string-length": "0.1.2" + }, + "dependencies": { + "ansi-regex": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-0.2.1.tgz", + "integrity": "sha1-DY6UaWej2BQ/k+JOKYUl/BsiNfk=", + "dev": true + }, + "ansi-styles": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-1.1.0.tgz", + "integrity": "sha1-6uy/Zs1waIJ2Cy9GkVgrj1XXp94=", + "dev": true + }, + "chalk": { + "version": "0.5.1", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-0.5.1.tgz", + "integrity": "sha1-Zjs6ZItotV0EaQ1JFnqoN4WPIXQ=", + "dev": true, + "requires": { + "ansi-styles": "1.1.0", + "escape-string-regexp": "1.0.5", + "has-ansi": "0.1.0", + "strip-ansi": "0.3.0", + "supports-color": "0.2.0" + } + }, + "has-ansi": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/has-ansi/-/has-ansi-0.1.0.tgz", + "integrity": "sha1-hPJlqujA5qiKEtcCKJS3VoiUxi4=", + "dev": true, + "requires": { + "ansi-regex": "0.2.1" + } + }, + "strip-ansi": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-0.3.0.tgz", + "integrity": "sha1-JfSOoiynkYfzF0pNuHWTR7sSYiA=", + "dev": true, + "requires": { + "ansi-regex": "0.2.1" + } + }, + "supports-color": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-0.2.0.tgz", + "integrity": "sha1-2S3iaU6z9nMjlz1649i1W0wiGQo=", + "dev": true + } + } + }, + "user-home": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/user-home/-/user-home-1.1.1.tgz", + "integrity": "sha1-K1viOjK2Onyd640PKNSFcko98ZA=", + "dev": true + }, + "useragent": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/useragent/-/useragent-2.2.1.tgz", + "integrity": "sha1-z1k+9PLRdYdei7ZY6pLhik/QbY4=", + "dev": true, + "requires": { + "lru-cache": "2.2.4", + "tmp": "0.0.33" + }, + "dependencies": { + "lru-cache": { + "version": "2.2.4", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-2.2.4.tgz", + "integrity": "sha1-bGWGGb7PFAMdDQtZSxYELOTcBj0=", + "dev": true + } + } + }, + "util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=", + "dev": true + }, + "utils-merge": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", + "integrity": "sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM=", + "dev": true + }, + "uuid": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.2.1.tgz", + "integrity": "sha512-jZnMwlb9Iku/O3smGWvZhauCf6cvvpKi4BKRiliS3cxnI+Gz9j5MEpTz2UFuXiKPJocb7gnsLHwiS05ige5BEA==", + "dev": true + }, + "uws": { + "version": "9.14.0", + "resolved": "https://registry.npmjs.org/uws/-/uws-9.14.0.tgz", + "integrity": "sha512-HNMztPP5A1sKuVFmdZ6BPVpBQd5bUjNC8EFMFiICK+oho/OQsAJy5hnIx4btMHiOk8j04f/DbIlqnEZ9d72dqg==", + "dev": true, + "optional": true + }, + "verror": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz", + "integrity": "sha1-OhBcoXBTr1XW4nDB+CiGguGNpAA=", + "dev": true, + "requires": { + "assert-plus": "1.0.0", + "core-util-is": "1.0.2", + "extsprintf": "1.3.0" + } + }, + "void-elements": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/void-elements/-/void-elements-2.0.1.tgz", + "integrity": "sha1-wGavtYK7HLQSjWDqkjkulNXp2+w=", + "dev": true + }, + "when": { + "version": "3.7.8", + "resolved": "https://registry.npmjs.org/when/-/when-3.7.8.tgz", + "integrity": "sha1-xxMLan6gRpPoQs3J56Hyqjmjn4I=", + "dev": true, + "optional": true + }, + "which": { + "version": "1.0.9", + "resolved": "https://registry.npmjs.org/which/-/which-1.0.9.tgz", + "integrity": "sha1-RgwdoPgQED0DIam2M6+eV15kSG8=", + "dev": true + }, + "win-release": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/win-release/-/win-release-1.1.1.tgz", + "integrity": "sha1-X6VeAr58qTTt/BJmVjLoSbcuUgk=", + "dev": true, + "requires": { + "semver": "5.5.0" + } + }, + "with-callback": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/with-callback/-/with-callback-1.0.2.tgz", + "integrity": "sha1-oJYpuakgAo1yFAT7Q1vc/1yRvCE=", + "dev": true, + "optional": true + }, + "wordwrap": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-1.0.0.tgz", + "integrity": "sha1-J1hIEIkUVqQXHI0CJkQa3pDLyus=", + "dev": true, + "optional": true + }, + "wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=", + "dev": true + }, + "ws": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/ws/-/ws-3.3.3.tgz", + "integrity": "sha512-nnWLa/NwZSt4KQJu51MYlCcSQ5g7INpOrOMt4XV8j4dqTXdmlUmSHQ8/oLC069ckre0fRsgfvsKwbTdtKLCDkA==", + "dev": true, + "requires": { + "async-limiter": "1.0.0", + "safe-buffer": "5.1.2", + "ultron": "1.1.1" + } + }, + "xdg-basedir": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/xdg-basedir/-/xdg-basedir-1.0.1.tgz", + "integrity": "sha1-FP+PY6T9vLBdW27qIrNvMDO58E4=", + "dev": true, + "requires": { + "user-home": "1.1.1" + } + }, + "xmlhttprequest-ssl": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/xmlhttprequest-ssl/-/xmlhttprequest-ssl-1.5.5.tgz", + "integrity": "sha1-wodrBhaKrcQOV9l+gRkayPQ5iz4=", + "dev": true + }, + "xregexp": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/xregexp/-/xregexp-2.0.0.tgz", + "integrity": "sha1-UqY+VsoLhKfzpfPWGHLxJq16WUM=", + "dev": true, + "optional": true + }, + "xtend": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.1.tgz", + "integrity": "sha1-pcbVMr5lbiPbgg77lDofBJmNY68=", + "dev": true + }, + "yallist": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-2.1.2.tgz", + "integrity": "sha1-HBH5IY8HYImkfdUS+TxmmaaoHVI=", + "dev": true, + "optional": true + }, + "yauzl": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/yauzl/-/yauzl-2.4.1.tgz", + "integrity": "sha1-lSj0QtqxsihOWLQ3m7GU4i4MQAU=", + "dev": true, + "requires": { + "fd-slicer": "1.0.1" + } + }, + "yeast": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/yeast/-/yeast-0.1.2.tgz", + "integrity": "sha1-AI4G2AlDIMNy28L47XagymyKxBk=", + "dev": true + } + } +} diff --git a/installer/fileserver/html/package.json b/installer/fileserver/html/package.json new file mode 100644 index 0000000000..d2663ff510 --- /dev/null +++ b/installer/fileserver/html/package.json @@ -0,0 +1,22 @@ +{ + "name": "vic-uis-auto-installer", + "version": "1.0.0", + "description": "", + "main": "index.js", + "scripts": { + "test": "karma start" + }, + "author": "", + "license": "ISC", + "devDependencies": { + "jasmine": "^3.1.0", + "jasmine-jquery": "^2.1.1", + "jasmine-spec-reporter": "^4.2.1", + "jquery": "^3.3.1", + "karma": "^2.0.2", + "karma-jasmine": "^1.1.2", + "karma-jasmine-ajax": "^0.1.13", + "karma-jasmine-jquery": "^0.1.1", + "karma-phantomjs-launcher": "^1.0.4" + } +} diff --git a/installer/fileserver/main.go b/installer/fileserver/main.go new file mode 100644 index 0000000000..76e1114b5d --- /dev/null +++ b/installer/fileserver/main.go @@ -0,0 +1,233 @@ +// Copyright 2016-2018 VMware, Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "context" + "crypto/tls" + "flag" + "fmt" + "net/http" + "os" + "os/signal" + "path/filepath" + "strings" + "syscall" + + log "github.com/Sirupsen/logrus" + + "github.com/vmware/vic-product/installer/fileserver/routes" + "github.com/vmware/vic-product/installer/fileserver/tasks" + "github.com/vmware/vic-product/installer/lib" + "github.com/vmware/vic-product/installer/pkg/ip" + "github.com/vmware/vic/pkg/certificate" + "github.com/vmware/vic/pkg/trace" +) + +type serverConfig struct { + addr string + certPath string + keyPath string + cert tls.Certificate + serveDir string + serverHostname string + admiralPort string + installerPort string + vicTarName string + logLevel string +} + +type serverRoute struct { + route string + handler http.Handler +} + +func parseServerConfig(op trace.Operation, conf *serverConfig) { + ud := syscall.Getuid() + gd := syscall.Getgid() + op.Info(fmt.Sprintf("Current UID/GID = %d/%d", ud, gd)) + /* TODO FIXME + if ud == 0 { + log.Error("Error: must not run as root.") + os.Exit(1) + } + */ + + flag.StringVar(&conf.addr, "addr", ":9443", "Listen address - must include host and port (addr:port)") + flag.StringVar(&conf.certPath, "cert", "", "Path to server certificate in PEM format") + flag.StringVar(&conf.keyPath, "key", "", "Path to server certificate key in PEM format") + flag.StringVar(&conf.serveDir, "dir", "/opt/vmware/fileserver", "Directory to serve and contain html data") + flag.StringVar(&conf.logLevel, "level", "debug", "Set's the log level to [info|debug|warning]; defaults to debug") + flag.Parse() + + routes.SetRenderPath(conf.serveDir) + + switch conf.logLevel { + case "warning": + trace.Logger.Level = log.WarnLevel + case "info": + trace.Logger.Level = log.InfoLevel + default: + trace.Logger.Level = log.DebugLevel + } + + if (conf.certPath == "" && conf.keyPath != "") || (conf.certPath != "" && conf.keyPath == "") { + op.Errorf("Both certificate and key must be specified") + } + + var err error + if conf.certPath != "" { + op.Infof("Loading certificate %s and key %s", conf.certPath, conf.keyPath) + conf.cert, err = tls.LoadX509KeyPair(conf.certPath, conf.keyPath) + if err != nil { + op.Fatalf("Failed to load certificate %s and key %s: %s", conf.certPath, conf.keyPath, err) + } + } else { + op.Info("Generating self signed certificate") + c, k, err := certificate.CreateSelfSigned(conf.addr, []string{"VMware, Inc."}, 2048) + if err != nil { + op.Errorf("Failed to generate a self-signed certificate: %s. Exiting.", err.Error()) + os.Exit(1) + } + conf.cert, err = tls.X509KeyPair(c.Bytes(), k.Bytes()) + if err != nil { + op.Errorf("Failed to load generated self-signed certificate: %s. Exiting.", err.Error()) + os.Exit(1) + } + } + op.Infof("Loaded certificate") + + ovf, err := lib.UnmarshaledOvfEnv() + if err != nil { + switch err.(type) { + case lib.EnvFetchError: + op.Fatalf("impossible to fetch ovf environment, exiting") + os.Exit(1) + case lib.UnmarshalError: + op.Errorf("error: %s", err.Error()) + } + } + + if ip, err := ip.FirstIPv4(ip.Eth0Interface); err == nil { + conf.serverHostname = tasks.GetHostname(ovf, ip) + if port, ok := ovf.Properties["management_portal.management_portal_port"]; ok { + conf.admiralPort = port + } + } + + // get the fileserver vic tar location + filepath.Walk("/opt/vmware/fileserver/files/", func(path string, f os.FileInfo, err error) error { + if strings.HasSuffix(path, ".tar.gz") { + conf.vicTarName = f.Name() + return fmt.Errorf("stop") // returning an error stops the file walk + } + return nil // vic tar not found, continue walking + }) +} + +// cspMiddleware sets the Content-Security-Policy header to prevent clickjacking +// https://www.owasp.org/index.php/Content_Security_Policy_Cheat_Sheet#Preventing_Clickjacking +func cspMiddleware() func(next http.Handler) http.Handler { + header := "Content-Security-Policy" + value := "frame-ancestors 'none';" + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Add(header, value) + next.ServeHTTP(w, r) + }) + } +} + +func main() { + op := trace.NewOperation(context.Background(), "Main") + var c serverConfig + parseServerConfig(op, &c) + + mux := http.NewServeMux() + + // attach static asset routes + staticAssets := []string{"css", "js", "images", "fonts"} + for _, asset := range staticAssets { + httpPath := fmt.Sprintf("/%s/", asset) + dirPath := filepath.Join(c.serveDir, "/html/", asset) + mux.Handle(httpPath, http.StripPrefix(httpPath, http.FileServer(http.Dir(dirPath)))) + } + + indexRenderer := &routes.IndexHTMLRenderer{ + ServerHostname: c.serverHostname, + ServerAddress: c.addr, + AdmiralPort: c.admiralPort, + VicTarName: c.vicTarName, + } + // attach fileserver route + routes := []*serverRoute{ + {"/plugin/install", http.HandlerFunc(routes.InstallPluginHandler)}, + {"/plugin/remove", http.HandlerFunc(routes.RemovePluginHandler)}, + {"/plugin/upgrade", http.HandlerFunc(routes.UpgradePluginHandler)}, + {"/register", http.HandlerFunc(routes.RegisterHandler)}, + {"/thumbprint", http.HandlerFunc(routes.ThumbprintHandler)}, + {"/files/", http.StripPrefix("/files/", http.FileServer(http.Dir(filepath.Join(c.serveDir, "files"))))}, + {"/", http.HandlerFunc(indexRenderer.IndexHandler)}, + } + + for _, route := range routes { + mux.Handle(route.route, route.handler) + } + + // start the web server + fileserver := &http.Server{ + Addr: c.addr, + Handler: cspMiddleware()(mux), + TLSConfig: lib.GetTLSServerConfig(c.cert), + } + + redirectServer := &http.Server{ + Addr: ":80", + Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + target := "https://" + req.Host + c.addr + req.URL.Path + http.Redirect(w, req, target, http.StatusMovedPermanently) + }), + } + // collect signals and errors to stop the fileserver + signals := make(chan os.Signal, 1) + errors := make(chan error, 1) + + go func() { + // redirect port 80 to 9443 to improve ux on ova + op.Infof("Starting redirect server on %s", redirectServer.Addr) + if err := redirectServer.ListenAndServe(); err != nil { + errors <- err + } + }() + go func() { + op.Infof("Starting fileserver server on %s", fileserver.Addr) + if err := fileserver.ListenAndServeTLS("", ""); err != nil { + errors <- err + } + }() + + signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM) + + select { + case sig := <-signals: + op.Fatalf("signal %s received", sig) + case err := <-errors: + op.Fatalf("error %s received", err) + } + fileserver.Close() + redirectServer.Close() + close(signals) + close(errors) +} diff --git a/installer/fileserver/register.go b/installer/fileserver/register.go deleted file mode 100644 index d9df51f137..0000000000 --- a/installer/fileserver/register.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2017 VMware, Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "time" - - "github.com/vmware/vic-product/installer/tagvm" -) - -type registerPayload struct { - Target string `json:"target"` - User string `json:"user"` - Password string `json:"password"` - ExternalPSC string `json:"externalpsc"` - PSCDomain string `json:"pscdomain"` -} - -func registerHandler(resp http.ResponseWriter, req *http.Request) { - switch req.Method { - case http.MethodPost: - - if req.Body == nil { - http.Error(resp, "Please send a request body", http.StatusBadRequest) - return - } - - var r registerPayload - err := json.NewDecoder(req.Body).Decode(&r) - if err != nil { - http.Error(resp, err.Error(), http.StatusBadRequest) - return - } - - defer req.Body.Close() - admin.Target = r.Target - admin.User = r.User - admin.Password = r.Password - cancel, err := admin.VerifyLogin() - defer cancel() - if err != nil { - http.Error(resp, err.Error(), http.StatusUnauthorized) - return - } - - ctx := context.TODO() - if err := tagvm.Run(ctx, admin.Validator.Session); err != nil { - http.Error(resp, err.Error(), http.StatusServiceUnavailable) - return - } - - pscInstance = r.ExternalPSC - pscDomain = r.PSCDomain - if err := registerWithPSC(ctx); err != nil { - http.Error(resp, err.Error(), http.StatusServiceUnavailable) - return - } - - if err := ioutil.WriteFile(initServicesTimestamp, []byte(time.Now().String()), 0644); err != nil { - errMsg := fmt.Sprintf("Failed to write to timestamp file: %s", err.Error()) - http.Error(resp, errMsg, http.StatusServiceUnavailable) - return - } - - http.Error(resp, "operation complete", http.StatusOK) - default: - http.Error(resp, "only accepts POST", http.StatusMethodNotAllowed) - } - return -} diff --git a/installer/fileserver/routes/index.go b/installer/fileserver/routes/index.go new file mode 100644 index 0000000000..1ff3112b91 --- /dev/null +++ b/installer/fileserver/routes/index.go @@ -0,0 +1,137 @@ +// Copyright 2016-2018 VMware, Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package routes + +import ( + "context" + "fmt" + "net/http" + "os" + + "github.com/vmware/vic-product/installer/fileserver/tasks" + "github.com/vmware/vic/pkg/trace" +) + +// IndexHTMLOptions contains fields for html templating in index.html +type IndexHTMLOptions struct { + InitErrorFeedback string + InitSuccessFeedback string + NeedLogin bool + AdmiralAddr string + FileserverAddr string + ValidationError string +} + +// IndexHTMLRenderer must be populated before the IndexHandler can render correctly +type IndexHTMLRenderer struct { + ServerHostname string + ServerAddress string + AdmiralPort string + VicTarName string +} + +// IndexHandler is an http.Handler for rendering the fileserver Getting Started Page +func (i *IndexHTMLRenderer) IndexHandler(resp http.ResponseWriter, req *http.Request) { + defer trace.End(trace.Begin("")) + op := trace.NewOperation(context.Background(), "IndexHandler") + if rejectRestrictedRequest(op, resp, req) { + return + } + html := &IndexHTMLOptions{ + NeedLogin: needInitializationServices(req), + InitErrorFeedback: "", + InitSuccessFeedback: "", + ValidationError: "", + } + + if req.Method == http.MethodPost { + if err := indexFormHandler(op, req, html); err != nil { + op.Errorf("Install failed: %s", err.Error()) + html.InitErrorFeedback = fmt.Sprintf("Installation failed: %s", err.Error()) + } else if req.FormValue("needuiplugin") == "true" { + html.InitSuccessFeedback = "Installation successful. Refer to the Post-install and Deployment tasks below. All vSphere Client users must log out and log back in again twice to see the vSphere Integrated Containers plug-in." + } else { + html.InitSuccessFeedback = "Installation successful. Refer to the Post-install and Deployment tasks below." + } + } + + html.AdmiralAddr = fmt.Sprintf("https://%s:%s", i.ServerHostname, i.AdmiralPort) + html.FileserverAddr = fmt.Sprintf("https://%s%s/files/%s", i.ServerHostname, i.ServerAddress, i.VicTarName) + + RenderTemplate(op, resp, "html/index.html", html) +} + +// indexFormHandler registers the appliance using post form values +func indexFormHandler(op trace.Operation, req *http.Request, html *IndexHTMLOptions) error { + // verify login + PSCConfig := tasks.NewPSCRegistrationConfig() + PSCConfig.Admin.Target = req.FormValue("target") + PSCConfig.Admin.User = req.FormValue("user") + PSCConfig.Admin.Password = req.FormValue("password") + PSCConfig.Admin.Thumbprint = req.FormValue("thumbprint") + PSCConfig.PscInstance = req.FormValue("psc") + PSCConfig.PscDomain = req.FormValue("pscDomain") + + // VerifyLogin populates Admin.Validator + cancel, err := PSCConfig.Admin.VerifyLogin(op) + defer cancel() + if err != nil { + op.Infof("Validation failed: %s", err.Error()) + html.ValidationError = err.Error() + return err + } + defer PSCConfig.Admin.Session.Logout(op) + + op.Infof("Validation succeeded") + html.NeedLogin = false + + if err := PSCConfig.RegisterAppliance(op); err != nil { + return err + } + + if req.FormValue("needuiplugin") == "true" { + h5 := tasks.NewH5UIPlugin(PSCConfig.Admin) + h5.Force = true + if err := h5.Install(op); err != nil { + return err + } + + flex := tasks.NewFlexUIPlugin(PSCConfig.Admin) + flex.Force = true + if err := flex.Install(op); err != nil { + return err + } + } + + return nil +} + +func needInitializationServices(req *http.Request) bool { + _, err := os.Stat(tasks.InitServicesTimestamp) + return os.IsNotExist(err) || req.URL.Query().Get("login") == "true" +} + +func rejectRestrictedRequest(op trace.Operation, resp http.ResponseWriter, req *http.Request) bool { + paths := map[string]struct{}{ + "/": {}, + "/index.html": {}, + } + if _, ok := paths[req.URL.Path]; !ok { + op.Errorf("Request path %s not found in %-v", req.URL.Path, paths) + http.NotFound(resp, req) + return true + } + return false +} diff --git a/installer/fileserver/routes/plugin.go b/installer/fileserver/routes/plugin.go new file mode 100644 index 0000000000..bb4c24fde4 --- /dev/null +++ b/installer/fileserver/routes/plugin.go @@ -0,0 +1,305 @@ +// Copyright 2018 VMware, Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package routes + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + + "github.com/vmware/vic-product/installer/fileserver/tasks" + "github.com/vmware/vic-product/installer/lib" + "github.com/vmware/vic/pkg/trace" +) + +type pluginPayload struct { + Vc *targetParameters `json:"vc"` + Plugin *pluginParameters `json:"plugin"` + Appliance *applianceParameters `json:"appliance"` +} + +type targetParameters struct { + Target string `json:"target,omitempty"` + User string `json:"user,omitempty"` + Password string `json:"password,omitempty"` + Thumbprint string `json:"thumbprint,omitempty"` +} + +type pluginParameters struct { + // If given, uses the H5 or Flex plugin presets + Preset string `json:"preset,omitempty"` + + // Optional Parameters + Force bool `json:"force,omitempty"` + Insecure bool `json:"insecure,omitempty"` + + // Mandatory Parameters + Configure bool `json:"configure,omitempty"` + Company string `json:"company,omitempty"` + HideInSolutionManager bool `json:"hide,omitempty"` + Key string `json:"key,omitempty"` + Name string `json:"name,omitempty"` + Summary string `json:"summary,omitempty"` + Version string `json:"version,omitempty"` + EntityType string `json:"entityType,omitempty"` +} + +type applianceParameters struct { + Host string `json:"host,omitempty"` + URL string `json:"url,omitempty"` + ServerThumbprint string `json:"thumbprint,omitempty"` +} + +type httpError struct { + ErrorType string `json:"type"` + Title string `json:"title"` + code int +} + +// InstallPluginHandler unwraps a json body as a tasks.Plugin and preforms +// the InstallPlugin task +func InstallPluginHandler(resp http.ResponseWriter, req *http.Request) { + defer trace.End(trace.Begin("")) + + switch req.Method { + case http.MethodPost: + op := trace.NewOperation(context.Background(), "InstallPluginHandler") + if req.Body == nil { + (&httpError{ + Title: "Request body not found.", + code: http.StatusBadRequest, + }).Error(op, resp) + return + } + + plugin, err := decodePluginPayload(op, req) + if err != nil { + op.Errorf("Could not decode plugin payload: %s", err.Error()) + (&httpError{ + Title: "Could not decode body.", + code: http.StatusUnprocessableEntity, + }).Error(op, resp) + return + } + + cancel, err := plugin.Target.VerifyLogin(op) + defer cancel() + if err != nil { + op.Errorf("Could not login to vc: %s", err.Error()) + (&httpError{ + Title: "Error authenticating with vc.", + code: http.StatusUnauthorized, + }).Error(op, resp) + return + } + defer plugin.Target.Session.Logout(op) + + if err := plugin.Install(op); err != nil { + op.Errorf("Could not install plugin: %s", err.Error()) + (&httpError{ + Title: "Error installing plugin.", + code: http.StatusInternalServerError, + }).Error(op, resp) + return + } + + resp.WriteHeader(http.StatusNoContent) + default: + http.Error(resp, "only accepts POST", http.StatusMethodNotAllowed) + } +} + +// RemovePluginHandler unwraps a json body as a tasks.Plugin and preforms +// the RemovePlugin task +func RemovePluginHandler(resp http.ResponseWriter, req *http.Request) { + defer trace.End(trace.Begin("")) + + switch req.Method { + case http.MethodPost: + op := trace.NewOperation(context.Background(), "RemovePluginHandler") + if req.Body == nil { + (&httpError{ + Title: "Request body not found.", + code: http.StatusBadRequest, + }).Error(op, resp) + return + } + + plugin, err := decodePluginPayload(op, req) + if err != nil { + op.Errorf("Could not decode plugin payload: %s", err.Error()) + (&httpError{ + Title: "Could not decode body.", + code: http.StatusUnprocessableEntity, + }).Error(op, resp) + return + } + + cancel, err := plugin.Target.VerifyLogin(op) + defer cancel() + if err != nil { + op.Errorf("Could not login to vc: %s", err.Error()) + (&httpError{ + Title: "Error authenticating with vc.", + code: http.StatusUnauthorized, + }).Error(op, resp) + return + } + defer plugin.Target.Session.Logout(op) + + if err := plugin.Remove(op); err != nil { + op.Errorf("Could not remove plugin: %s", err.Error()) + (&httpError{ + Title: "Error removing plugin.", + code: http.StatusInternalServerError, + }).Error(op, resp) + return + } + + resp.WriteHeader(http.StatusNoContent) + default: + http.Error(resp, "only accepts POST", http.StatusMethodNotAllowed) + } +} + +// UpgradePluginHandler unwraps a json body as a tasks.Plugin and preforms +// the force InstallPlugin task +func UpgradePluginHandler(resp http.ResponseWriter, req *http.Request) { + defer trace.End(trace.Begin("")) + + switch req.Method { + case http.MethodPost: + op := trace.NewOperation(context.Background(), "UpgradePluginHandler") + if req.Body == nil { + (&httpError{ + Title: "Request body not found.", + code: http.StatusBadRequest, + }).Error(op, resp) + return + } + + plugin, err := decodePluginPayload(op, req) + if err != nil { + op.Errorf("Could not decode plugin payload: %s", err.Error()) + (&httpError{ + Title: "Could not decode body.", + code: http.StatusUnprocessableEntity, + }).Error(op, resp) + return + } + + cancel, err := plugin.Target.VerifyLogin(op) + defer cancel() + if err != nil { + op.Errorf("Could not login to vc: %s", err.Error()) + (&httpError{ + Title: "Error authenticating with vc.", + code: http.StatusUnauthorized, + }).Error(op, resp) + return + } + defer plugin.Target.Session.Logout(op) + + plugin.Force = true + + if err := plugin.Install(op); err != nil { + op.Errorf("Could not upgrade plugin: %s", err.Error()) + (&httpError{ + Title: "Error upgrading plugin.", + code: http.StatusInternalServerError, + }).Error(op, resp) + return + } + + resp.WriteHeader(http.StatusNoContent) + default: + http.Error(resp, "only accepts POST", http.StatusMethodNotAllowed) + } +} + +func decodePluginPayload(op trace.Operation, req *http.Request) (*tasks.Plugin, error) { + defer trace.End(trace.Begin("")) + + var p pluginPayload + err := json.NewDecoder(req.Body).Decode(&p) + if err != nil { + return nil, err + } + defer req.Body.Close() + + if p.Vc == nil { + return nil, errors.New("Please supply a vCenter target object") + } + + if p.Plugin == nil { + return nil, errors.New("Please supply a Plugin object") + } + + if p.Appliance == nil { + p.Appliance = &applianceParameters{} + } + + loginInfo := &lib.LoginInfo{ + Target: p.Vc.Target, + User: p.Vc.User, + Password: p.Vc.Password, + Thumbprint: p.Vc.Thumbprint, + } + + var plugin *tasks.Plugin + switch p.Plugin.Preset { + case "H5": + plugin = tasks.NewH5UIPlugin(loginInfo) + case "FLEX": + plugin = tasks.NewFlexUIPlugin(loginInfo) + default: + plugin = tasks.NewUIPlugin(loginInfo) + plugin.Configure = p.Plugin.Configure + plugin.Company = p.Plugin.Company + plugin.HideInSolutionManager = p.Plugin.HideInSolutionManager + plugin.Key = p.Plugin.Key + plugin.Name = p.Plugin.Name + plugin.Summary = p.Plugin.Summary + plugin.Version = p.Plugin.Version + plugin.EntityType = p.Plugin.EntityType + } + + plugin.Force = p.Plugin.Force + plugin.Insecure = p.Plugin.Insecure + + plugin.ApplianceHost = p.Appliance.Host + plugin.ApplianceServerThumbprint = p.Appliance.ServerThumbprint + plugin.ApplianceURL = p.Appliance.URL + + op.Debugf("Decoded plugin: %#v", plugin) + return plugin, nil +} + +func (e *httpError) Error(op trace.Operation, resp http.ResponseWriter) { + if e.code == 0 { + e.code = http.StatusBadRequest + } + if e.ErrorType == "" { + e.ErrorType = "about:blank" + } + resp.WriteHeader(e.code) + err := json.NewEncoder(resp).Encode(e) + if err != nil { + op.Errorf("Cannot send http error response: %s", err) + fmt.Fprintln(resp, "Error serving json.") + } +} diff --git a/installer/fileserver/routes/register.go b/installer/fileserver/routes/register.go new file mode 100644 index 0000000000..a359e6dbec --- /dev/null +++ b/installer/fileserver/routes/register.go @@ -0,0 +1,84 @@ +// Copyright 2017-2018 VMware, Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package routes + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + + "github.com/vmware/vic-product/installer/fileserver/tasks" + "github.com/vmware/vic/pkg/trace" +) + +type registerPayload struct { + Target string `json:"target"` + User string `json:"user"` + Password string `json:"password"` + Thumbprint string `json:"thumbprint,omitempty"` + ExternalPSC string `json:"externalpsc"` + PSCDomain string `json:"pscdomain"` +} + +// RegisterHandler unwraps a json body as a PSCRegistrationConfig and preforms +// the RegisterWithPSC task +func RegisterHandler(resp http.ResponseWriter, req *http.Request) { + defer trace.End(trace.Begin("")) + + switch req.Method { + case http.MethodPost: + op := trace.NewOperation(context.Background(), "RegisterHandler") + if req.Body == nil { + http.Error(resp, "Please send a request body", http.StatusBadRequest) + return + } + + var r registerPayload + err := json.NewDecoder(req.Body).Decode(&r) + if err != nil { + http.Error(resp, err.Error(), http.StatusBadRequest) + return + } + defer req.Body.Close() + + PSCConfig := tasks.NewPSCRegistrationConfig() + PSCConfig.Admin.Target = r.Target + PSCConfig.Admin.User = r.User + PSCConfig.Admin.Password = r.Password + PSCConfig.Admin.Thumbprint = r.Thumbprint + cancel, err := PSCConfig.Admin.VerifyLogin(op) + defer cancel() + if err != nil { + op.Infof("Validation failed") + http.Error(resp, err.Error(), http.StatusUnauthorized) + return + } + defer PSCConfig.Admin.Session.Logout(op) + + op.Infof("Validation succeeded") + if err := PSCConfig.RegisterAppliance(op); err != nil { + errMsg := fmt.Sprintf("Failed to write to register appliance: %s", err.Error()) + http.Error(resp, errMsg, http.StatusInternalServerError) + return + } + + resp.WriteHeader(http.StatusOK) + resp.Write([]byte("operation complete")) + default: + http.Error(resp, "only accepts POST", http.StatusMethodNotAllowed) + } + return +} diff --git a/installer/fileserver/routes/renderer.go b/installer/fileserver/routes/renderer.go new file mode 100644 index 0000000000..94bd8e18b8 --- /dev/null +++ b/installer/fileserver/routes/renderer.go @@ -0,0 +1,58 @@ +// Copyright 2018 VMware, Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package routes + +import ( + "fmt" + "html/template" + "net/http" + + "github.com/vmware/vic/pkg/trace" +) + +var ( + // Share the renderer between the whole package. Set the rootPath before use if needed. + renderer *templateRenderer +) + +type templateRenderer struct { + rootPath string +} + +func init() { + renderer = &templateRenderer{} +} + +// SetRenderPath sets the render path of the global renderer var +func SetRenderPath(path string) { + renderer.rootPath = path +} + +// RenderTemplate writes a golang html template to an http response +func RenderTemplate(op trace.Operation, resp http.ResponseWriter, filename string, data interface{}) { + defer trace.End(trace.Begin("")) + + op.Infof("render: %s", filename) + filename = fmt.Sprintf("%s/%s", renderer.rootPath, filename) + tmpl, err := template.ParseFiles(filename) + if err != nil { + http.Error(resp, err.Error(), http.StatusInternalServerError) + return + } + if err := tmpl.Execute(resp, data); err != nil { + http.Error(resp, err.Error(), http.StatusInternalServerError) + return + } +} diff --git a/installer/fileserver/routes/thumbprint.go b/installer/fileserver/routes/thumbprint.go new file mode 100644 index 0000000000..2cff89fd95 --- /dev/null +++ b/installer/fileserver/routes/thumbprint.go @@ -0,0 +1,60 @@ +// Copyright 2018 VMware, Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package routes + +import ( + "context" + "crypto/tls" + "net/http" + "net/url" + + "github.com/vmware/govmomi/object" + "github.com/vmware/vic/pkg/trace" +) + +const ( + targetKey = "target" +) + +// ThumbprintHandler returns the thumbprint of the ip/fqdn given by the get parameter targetKey +func ThumbprintHandler(resp http.ResponseWriter, req *http.Request) { + defer trace.End(trace.Begin("")) + + switch req.Method { + case http.MethodPost: + op := trace.NewOperation(context.Background(), "ThumbprintHandler") + target := req.FormValue(targetKey) + if target == "" { + op.Infof("Target not supplied") + http.Error(resp, "Please supply a target", http.StatusUnprocessableEntity) + return + } + + // see https://github.com/vmware/govmomi/blob/master/govc/flags/host_connect.go#L70-L85 + var cert object.HostCertificateInfo + if err := cert.FromURL(&url.URL{Host: target}, &tls.Config{}); err != nil { + op.Errorf("Error getting thumbprint for %s: %s", target, err.Error()) + http.Error(resp, "Error getting thumbprint", http.StatusInternalServerError) + return + } + + op.Infof("Thumbprint found") + resp.WriteHeader(http.StatusOK) + resp.Write([]byte(cert.ThumbprintSHA1)) + default: + http.Error(resp, "only accepts POST", http.StatusMethodNotAllowed) + } + return +} diff --git a/installer/fileserver/server.go b/installer/fileserver/server.go deleted file mode 100644 index 3c965441a2..0000000000 --- a/installer/fileserver/server.go +++ /dev/null @@ -1,294 +0,0 @@ -// Copyright 2016-2017 VMware, Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "context" - "crypto/tls" - "flag" - "fmt" - "html/template" - "io/ioutil" - "net/http" - "os" - "path/filepath" - "strings" - "syscall" - "time" - - log "github.com/Sirupsen/logrus" - - "github.com/vmware/vic-product/installer/lib" - "github.com/vmware/vic-product/installer/pkg/ip" - "github.com/vmware/vic-product/installer/tagvm" - "github.com/vmware/vic/pkg/certificate" - "github.com/vmware/vic/pkg/errors" - "github.com/vmware/vic/pkg/trace" -) - -type config struct { - addr string - certPath string - keyPath string - cert tls.Certificate - serveDir string - serverHostname string - admiralPort string - installerPort string - vicTarName string - logLevel string -} - -// IndexHTMLOptions contains fields for html templating in index.html -type IndexHTMLOptions struct { - InitErrorFeedback string - InitSuccessFeedback string - NeedLogin bool - AdmiralAddr string - DemoVCHAddr string - FileserverAddr string - ValidationError string -} - -var ( - admin = &lib.LoginInfo{} - c config - - // pscInstance holds the form input for the PSC field - pscInstance string - - // pscDomain holds the form input for the PSC Admin Domain field - pscDomain string -) - -const initServicesTimestamp = "./registration-timestamps.txt" - -func Init(conf *config) { - ud := syscall.Getuid() - gd := syscall.Getgid() - log.Info(fmt.Sprintf("Current UID/GID = %d/%d", ud, gd)) - /* TODO FIXME - if ud == 0 { - log.Error("Error: must not run as root.") - os.Exit(1) - } - */ - - flag.StringVar(&conf.addr, "addr", ":9443", "Listen address - must include host and port (addr:port)") - flag.StringVar(&conf.certPath, "cert", "", "Path to server certificate in PEM format") - flag.StringVar(&conf.keyPath, "key", "", "Path to server certificate key in PEM format") - flag.StringVar(&conf.serveDir, "dir", "/opt/vmware/fileserver", "Directory to serve and contain html data") - flag.StringVar(&conf.logLevel, "level", "debug", "Set's the log level to [info|debug|warning]; defaults to debug") - - flag.Parse() - - switch conf.logLevel { - case "warning": - log.SetLevel(log.WarnLevel) - case "info": - log.SetLevel(log.InfoLevel) - default: - log.SetLevel(log.DebugLevel) - } - - if (conf.certPath == "" && conf.keyPath != "") || (conf.certPath != "" && conf.keyPath == "") { - log.Errorf("Both certificate and key must be specified") - } - - var err error - if conf.certPath != "" { - log.Infof("Loading certificate %s and key %s", conf.certPath, conf.keyPath) - conf.cert, err = tls.LoadX509KeyPair(conf.certPath, conf.keyPath) - if err != nil { - log.Fatalf("Failed to load certificate %s and key %s: %s", conf.certPath, conf.keyPath, err) - } - } else { - log.Info("Generating self signed certificate") - c, k, err := certificate.CreateSelfSigned(conf.addr, []string{"VMware, Inc."}, 2048) - if err != nil { - log.Errorf("Failed to generate a self-signed certificate: %s. Exiting.", err.Error()) - os.Exit(1) - } - conf.cert, err = tls.X509KeyPair(c.Bytes(), k.Bytes()) - if err != nil { - log.Errorf("Failed to load generated self-signed certificate: %s. Exiting.", err.Error()) - os.Exit(1) - } - } - log.Infof("Loaded certificate") - - ovf, err := lib.UnmarshaledOvfEnv() - if err != nil { - switch err.(type) { - case lib.EnvFetchError: - log.Fatalf("impossible to fetch ovf environment, exiting") - os.Exit(1) - case lib.UnmarshalError: - log.Errorf("error: %s", err.Error()) - } - } - - if ip, err := ip.FirstIPv4(ip.Eth0Interface); err == nil { - conf.serverHostname = getHostname(ovf, ip) - if port, ok := ovf.Properties["management_portal.management_portal_port"]; ok { - conf.admiralPort = port - } - } - - // get the fileserver vic tar location - filepath.Walk("/opt/vmware/fileserver/files/", func(path string, f os.FileInfo, err error) error { - if strings.HasSuffix(path, ".tar.gz") { - c.vicTarName = f.Name() - return fmt.Errorf("stop") // returning an error stops the file walk - } - return nil // vic tar not found, continue walking - }) -} - -// cspMiddleware sets the Content-Security-Policy header to prevent clickjacking -// https://www.owasp.org/index.php/Content_Security_Policy_Cheat_Sheet#Preventing_Clickjacking -func cspMiddleware() func(next http.Handler) http.Handler { - header := "Content-Security-Policy" - value := "frame-ancestors 'none';" - return func(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Add(header, value) - next.ServeHTTP(w, r) - }) - } -} - -func main() { - - Init(&c) - - mux := http.NewServeMux() - - // attach static asset routes - routes := []string{"css", "js", "images", "fonts"} - for _, route := range routes { - httpPath := fmt.Sprintf("/%s/", route) - dirPath := filepath.Join(c.serveDir, "/html/", route) - mux.Handle(httpPath, http.StripPrefix(httpPath, http.FileServer(http.Dir(dirPath)))) - } - - // attach fileserver route - dirPath := filepath.Join(c.serveDir, "files") - mux.Handle("/files/", http.StripPrefix("/files/", http.FileServer(http.Dir(dirPath)))) - - // attach register route, for registration automation - mux.Handle("/register", http.HandlerFunc(registerHandler)) - - // attach root index route - mux.Handle("/", http.HandlerFunc(indexHandler)) - - // start the web server - s := lib.GetTLSServer(c.addr, cspMiddleware()(mux), c.cert) - - log.Infof("Starting fileserver server on %s", s.Addr) - // redirect port 80 to 9443 to improve ux on ova - go http.ListenAndServe(":80", http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - target := "https://" + req.Host + c.addr + req.URL.Path - http.Redirect(w, req, target, http.StatusMovedPermanently) - })) - log.Fatal(s.ListenAndServeTLS("", "")) -} - -func indexHandler(resp http.ResponseWriter, req *http.Request) { - defer trace.End(trace.Begin("")) - - html := &IndexHTMLOptions{ - NeedLogin: needInitializationServices(req), - InitErrorFeedback: "", - InitSuccessFeedback: "", - ValidationError: "", - } - - if req.Method == http.MethodPost { - // verify login - admin.Target = req.FormValue("target") - admin.User = req.FormValue("user") - admin.Password = req.FormValue("password") - pscInstance = req.FormValue("psc") - pscDomain = req.FormValue("pscDomain") - cancel, err := admin.VerifyLogin() - defer cancel() - if err != nil { - log.Infof("Validation failed: %s", err.Error()) - html.ValidationError = err.Error() - } else { - log.Infof("Validation succeeded") - html.InitErrorFeedback = startInitializationServices() - // Display success message upon init success. - if html.InitErrorFeedback == "" { - html.InitSuccessFeedback = "Installation successful. Refer to the Post-install and Deployment tasks below." - } - - html.NeedLogin = false - } - } - - html.AdmiralAddr = fmt.Sprintf("https://%s:%s", c.serverHostname, c.admiralPort) - html.DemoVCHAddr = fmt.Sprintf("https://%s:%s", c.serverHostname, c.installerPort) - html.FileserverAddr = fmt.Sprintf("https://%s%s/files/%s", c.serverHostname, c.addr, c.vicTarName) - - renderTemplate(resp, "html/index.html", html) -} - -func renderTemplate(resp http.ResponseWriter, filename string, data interface{}) { - defer trace.End(trace.Begin("")) - - log.Infof("render: %s", filename) - filename = fmt.Sprintf("%s/%s", c.serveDir, filename) - log.Infof("render: %s", filename) - tmpl, err := template.ParseFiles(filename) - if err != nil { - http.Error(resp, err.Error(), http.StatusInternalServerError) - } - if err := tmpl.Execute(resp, data); err != nil { - http.Error(resp, err.Error(), http.StatusInternalServerError) - } -} - -// startInitializationServices performs some OVA init tasks - tagging the OVA VM -// registering Admiral with PSC. Errors, if any, are concatenated and returned. -func startInitializationServices() string { - var errorMsg []string - - ctx := context.TODO() - if err := tagvm.Run(ctx, admin.Validator.Session); err != nil { - log.Debug(errors.ErrorStack(err)) - errorMsg = append(errorMsg, "Failed to locate VIC Appliance. Please check the vCenter Server provided and try again") - } - - if err := registerWithPSC(ctx); err != nil { - log.Debug(errors.ErrorStack(err)) - errorMsg = append(errorMsg, "Failed to register with PSC. Please check the PSC settings provided and try again") - } - - if len(errorMsg) == 0 { - err := ioutil.WriteFile(initServicesTimestamp, []byte(time.Now().String()), 0644) - if err != nil { - log.Debug(errors.ErrorStack(err)) - errorMsg = append(errorMsg, "Failed to write to timestamp file: %s", err.Error()) - } - } - return strings.Join(errorMsg, "
") -} - -func needInitializationServices(req *http.Request) bool { - _, err := os.Stat(initServicesTimestamp) - return os.IsNotExist(err) || req.URL.Query().Get("login") == "true" -} diff --git a/installer/fileserver/tasks.go b/installer/fileserver/tasks.go deleted file mode 100644 index 3a2a715a27..0000000000 --- a/installer/fileserver/tasks.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2017 VMware, Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "context" - "fmt" - "net" - "os/exec" - "strings" - - log "github.com/Sirupsen/logrus" - - "github.com/vmware/vic-product/installer/lib" - "github.com/vmware/vic-product/installer/pkg/ip" - "github.com/vmware/vic/pkg/vsphere/optmanager" -) - -const ( - pscBinaryPath = "/etc/vmware/admiral/admiral-auth-psc-1.3.2-SNAPSHOT-command.jar" - vcHostnameOption = "config.vpxd.hostnameUrl" - pscConfDir = "/etc/vmware/psc" - pscConfFileName = "psc-config.properties" -) - -// registerWithPSC runs the PSC register command to register VIC services with -// the platforms services controller. The command generates config files and -// keystore files to use while getting and renewing tokens. -func registerWithPSC(ctx context.Context) error { - var err error - - // Obtain the admin user's domain - domain := "vsphere.local" - userFields := strings.SplitN(admin.User, "@", 2) - if len(userFields) == 2 { - domain = userFields[1] - } - - if pscInstance == "" { - // Obtain the hostname of the vCenter host to use as PSC instance - pscInstance, err = optmanager.QueryOptionValue(ctx, admin.Validator.Session, vcHostnameOption) - if err != nil { - return err - } - } - if pscDomain != "" { - log.Infof("User domain: %s PSC domain: %s. Using %s", domain, pscDomain, pscDomain) - domain = pscDomain - } - log.Infof("vCenter user: %s", admin.User) - log.Infof("PSC instance: %s", pscInstance) - log.Infof("PSC domain: %s", domain) - - // Obtain the OVA VM's IP - vmIP, err := ip.FirstIPv4(ip.Eth0Interface) - if err != nil { - return err - } - - // Fetch the OVF env to get the Admiral port - ovf, err := lib.UnmarshaledOvfEnv() - if err != nil { - return err - } - admiralPort := ovf.Properties["management_portal.management_portal_port"] - - // Out of the box users - defCreateUsers, foundCreateUsers := ovf.Properties["default_users.create_def_users"] - defPrefix, foundPrefix := ovf.Properties["default_users.def_user_prefix"] - defPassword, foundPassword := ovf.Properties["default_users.def_user_password"] - - log.Infof("PSC Out of the box users. CreateUsers: %s, FoundCreateUsers: %v, Prefix: %s", - defCreateUsers, foundCreateUsers, defPrefix) - - // Register all VIC components with PSC - cmdName := "/usr/bin/java" - for _, client := range []string{"harbor", "engine", "admiral"} { - - cmdArgs := []string{ - "-jar", - pscBinaryPath, - "--command=register", - "--clientName=" + client, - // NOTE(anchal): version set to 6.0 to use SAML for both versions 6.0 and 6.5 - "--version=6.0", - "--tenant=" + domain, - "--domainController=" + pscInstance, - "--username=" + admin.User, - "--password=" + admin.Password, - "--admiralUrl=" + fmt.Sprintf("https://%s:%s", getHostname(ovf, vmIP), admiralPort), - "--configDir=" + pscConfDir, - } - - if client == "admiral" && foundCreateUsers && strings.ToLower(defCreateUsers) == "true" { - if foundPrefix && defPrefix != "" { - arg := "--defaultUserPrefix=" + defPrefix - cmdArgs = append(cmdArgs, arg) - } - - if foundPassword && defPrefix != "" && defPassword != "" { - arg := "--defaultUserPassword=" + defPassword - cmdArgs = append(cmdArgs, arg) - } - } - - // #nosec: Subprocess launching with variable. - // This runs the PSC tool's register command. - cmd := exec.Command(cmdName, cmdArgs...) - if output, err := cmd.CombinedOutput(); err != nil { - log.Infof("Error running PSC register command for %s: %s", client, string(output)) - return err - } - log.Infof("Successfully registered %s with PSC", client) - } - - return nil -} - -func getHostname(ovf lib.Environment, vmIP net.IP) string { - - // Until we gix transient hostnames, use the static hostname reported by hostnamectl. - // os.Hostname() returns the kernel hostname, with no regard to transient or static classifications. - // fqdn, err := os.Hostname() - // var url string - // if err == nil && fqdn != "" { - // return fqdn - // } else { - // return vmIP.String() - // } - - command := "hostnamectl status --static" - // #nosec: Subprocess launching with variable. - out, err := exec.Command("/bin/bash", "-c", command).Output() - if err != nil { - log.Errorf(err.Error()) - return vmIP.String() - } - outString := strings.TrimSpace(string(out)) - if outString == "" { - return vmIP.String() - } - return outString -} diff --git a/installer/fileserver/tasks/ova/configure.go b/installer/fileserver/tasks/ova/configure.go new file mode 100644 index 0000000000..fc98900d70 --- /dev/null +++ b/installer/fileserver/tasks/ova/configure.go @@ -0,0 +1,135 @@ +// Copyright 2016-2018 VMware, Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ova + +import ( + "context" + "net" + "net/url" + + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/types" + "github.com/vmware/vic/pkg/errors" + "github.com/vmware/vic/pkg/trace" + "github.com/vmware/vic/pkg/vsphere/session" + "github.com/vmware/vic/pkg/vsphere/tasks" + "github.com/vmware/vic/pkg/vsphere/vm" +) + +const ( + // ManagedByKey defines the extension key to use in the ManagedByInfo of the OVA + ManagedByKey = "com.vmware.vic" + // ManagedByType defines the type to use in the ManagedByInfo of the OVA + ManagedByType = "VicApplianceVM" +) + +// ConfigureManagedByInfo sets the ManagedBy field for the VM specified by ovaURL +func ConfigureManagedByInfo(op trace.Operation, sess *session.Session, ovaURL string) error { + op.Infof("Attempting to create the appliance vm ref") + v, err := getOvaVM(op, sess, ovaURL) + if err != nil { + return err + } + + op.Infof("Attempting to configure ManagedByInfo") + err = configureManagedByInfo(op, sess, v) + if err != nil { + return err + } + + op.Infof("Successfully configured ManagedByInfo") + return nil +} + +func configureManagedByInfo(op trace.Operation, sess *session.Session, v *vm.VirtualMachine) error { + spec := types.VirtualMachineConfigSpec{ + ManagedBy: &types.ManagedByInfo{ + ExtensionKey: ManagedByKey, + Type: ManagedByType, + }, + } + + info, err := v.WaitForResult(op, func(ctx context.Context) (tasks.Task, error) { + return v.Reconfigure(ctx, spec) + }) + + if err != nil { + op.Errorf("Error while setting ManagedByInfo: %s", err) + return err + } + + if info.State != types.TaskInfoStateSuccess { + op.Errorf("Setting ManagedByInfo reported: %s", info.Error.LocalizedMessage) + return err + } + + return nil +} + +func getOvaVM(op trace.Operation, sess *session.Session, u string) (*vm.VirtualMachine, error) { + ovaURL, err := url.Parse(u) + if err != nil { + return nil, err + } + + host := ovaURL.Hostname() + + op.Debugf("Looking up host %s", host) + ips, err := net.LookupIP(host) + if err != nil { + return nil, errors.Errorf("IP lookup failed: %s", err) + } + + op.Debugf("found %d IP(s) from hostname lookup on %s:", len(ips), host) + var ip string + for _, i := range ips { + op.Debugf(i.String()) + if i.To4() != nil { + ip = i.String() + } + } + + if ip == "" { + return nil, errors.Errorf("IPV6 support not yet implemented") + } + + // Create a vm reference using the appliance ip + ref, err := object.NewSearchIndex(sess.Vim25()).FindByIp(op, nil, ip, true) + if err != nil { + return nil, errors.Errorf("failed to search for vms: %s", err.Error()) + } + + v, ok := ref.(*object.VirtualMachine) + if !ok { + return nil, errors.Errorf("failed to find vm with ip: %s", ip) + } + + op.Debugf("Checking IP for %s", v.Reference().Value) + vmIP, err := v.WaitForIP(op) + if err != nil { + return nil, errors.Errorf("Cannot get appliance vm ip: %s", err.Error()) + } + + // verify the tagged vm has the IP we expect + if vmIP != ip { + return nil, errors.Errorf("vm ip %s does not match guest.ip %s", vmIP, ip) + } + + op.Debugf("Found OVA with matching IP: %s", ip) + return &vm.VirtualMachine{ + VirtualMachine: v, + Session: sess, + }, nil +} diff --git a/installer/fileserver/tasks/ova/configure_test.go b/installer/fileserver/tasks/ova/configure_test.go new file mode 100644 index 0000000000..cd5a56f0ce --- /dev/null +++ b/installer/fileserver/tasks/ova/configure_test.go @@ -0,0 +1,105 @@ +// Copyright 2016-2018 VMware, Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ova + +import ( + "context" + "crypto/tls" + "fmt" + "net/url" + "os" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/vmware/govmomi/object" + "github.com/vmware/vic/pkg/trace" + "github.com/vmware/vic/pkg/vsphere/session" +) + +func TestGetOvaVMWithBadURL(t *testing.T) { + bogusURL := "foo/bar.url://what-is-this" + op := trace.NewOperation(context.Background(), "TestGetOvaVMWithBadURL") + vm, err := getOvaVM(op, nil, bogusURL) + assert.Nil(t, vm) + assert.Error(t, err) +} + +func TestGetOvaVM(t *testing.T) { + username := os.Getenv("TEST_VC_USERNAME") + password := os.Getenv("TEST_VC_PASSWORD") + vcURL := os.Getenv("TEST_VC_URL") + ovaURL := os.Getenv("TEST_OVA_URL") + op := trace.NewOperation(context.Background(), "TestGetOvaVM") + + if vcURL == "" || ovaURL == "" { + op.Infof("Skipping TestGetOvaVM") + t.Skipf("This test should only run against a VC with a deployed OVA") + } + + vc, err := url.Parse(vcURL) + if err != nil { + fmt.Printf("Failed to parse VC url: %s", err) + t.FailNow() + } + + vc.User = url.UserPassword(username, password) + + var cert object.HostCertificateInfo + if err = cert.FromURL(vc, new(tls.Config)); err != nil { + op.Error(err) + t.FailNow() + } + + if cert.Err != nil { + op.Errorf("Failed to verify certificate for target=%s (thumbprint=%s)", vc.Host, cert.ThumbprintSHA1) + op.Error(cert.Err.Error()) + } + + tp := cert.ThumbprintSHA1 + op.Infof("Accepting host %q thumbprint %s", vc.Host, tp) + + sessionConfig := &session.Config{ + Thumbprint: tp, + Service: vc.String(), + DatacenterPath: "/ha-datacenter", + DatastorePath: "datastore1", + User: vc.User, + Insecure: true, + } + + s := session.NewSession(sessionConfig) + sess, err := s.Connect(op) + if err != nil { + op.Errorf("Error connecting: %s", err.Error()) + } + defer sess.Logout(op) + + sess, err = sess.Populate(op) + if err != nil { + op.Errorf("Error populating: %s", err.Error()) + } + + vm, err := getOvaVM(op, sess, ovaURL) + if err != nil { + op.Errorf("Error getting OVA: %s", err.Error()) + } + if vm == nil { + op.Errorf("No VM found") + t.FailNow() + } + + op.Infof("%s", vm.String()) +} diff --git a/installer/fileserver/tasks/plugin.go b/installer/fileserver/tasks/plugin.go new file mode 100644 index 0000000000..0811561f31 --- /dev/null +++ b/installer/fileserver/tasks/plugin.go @@ -0,0 +1,447 @@ +// Copyright 2016-2018 VMware, Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "context" + "crypto/tls" + "fmt" + "net/url" + "os" + "path/filepath" + "regexp" + "strings" + + "github.com/vmware/govmomi/object" + "github.com/vmware/vic-product/installer/fileserver/tasks/ova" + "github.com/vmware/vic-product/installer/fileserver/tasks/plugin" + "github.com/vmware/vic-product/installer/lib" + "github.com/vmware/vic-product/installer/pkg/ip" + "github.com/vmware/vic/pkg/errors" + "github.com/vmware/vic/pkg/trace" +) + +const ( + h5ClientPluginName = "vSphere Integrated Containers-H5Client" + h5ClientPluginSummary = "Plugin for vSphere Integrated Containers-H5Client" + h5ClientPluginKey = "com.vmware.vic" + flexClientPluginName = "vSphere Integrated Containers-FlexClient" + flexClientPluginSummary = "Plugin for vSphere Integrated Containers-FlexClient" + flexClientPluginKey = "com.vmware.vic.ui" + pluginCompany = "VMware" + pluginEntityType = "VicApplianceVM" + fileserverPluginsPath = "/opt/vmware/fileserver/files/" +) + +var ( + pluginVersion string +) + +func init() { + op := trace.NewOperation(context.Background(), "Init") + // Match the com.vmware.vic-vX.X.X.X.zip file + re := regexp.MustCompile(`com\.vmware\.vic-v(\d+\.\d+\.\d+\.\d+)\.zip`) + filepath.Walk(fileserverPluginsPath, func(path string, f os.FileInfo, err error) error { + // First match from FindStringSubmatch is always the full match + if f == nil || f.IsDir() { + return nil + } + match := re.FindStringSubmatch(f.Name()) + if len(match) > 1 { + pluginVersion = match[1] + op.Debugf("found plugin '%s' with version '%s'", f.Name(), match[1]) + return fmt.Errorf("stop") // returning an error stops the file walk + } + return nil + }) +} + +// Plugin has all input parameters for vic-ui ui command +type Plugin struct { + Target *lib.LoginInfo + + Force bool + Insecure bool + + Company string + HideInSolutionManager bool + Configure bool + Key string + Name string + Summary string + Version string + EntityType string + + ApplianceHost string + ApplianceURL string + ApplianceServerThumbprint string +} + +// NewUIPlugin Returns a UI Plugin struct with the given target +func NewUIPlugin(target *lib.LoginInfo) *Plugin { + if target == nil { + return &Plugin{Target: &lib.LoginInfo{}} + } + return &Plugin{Target: target} +} + +// NewH5UIPlugin Returns a UI Plugin struct populated defaults for an H5 Client install +func NewH5UIPlugin(target *lib.LoginInfo) *Plugin { + p := NewUIPlugin(target) + p.Version = pluginVersion + p.EntityType = pluginEntityType + p.Company = pluginCompany + p.Key = h5ClientPluginKey + p.Name = h5ClientPluginName + p.Summary = h5ClientPluginSummary + p.Configure = true + p.Insecure = true + + return p +} + +// NewFlexUIPlugin Returns a UI Plugin struct populated defaults for an Flex Client install +func NewFlexUIPlugin(target *lib.LoginInfo) *Plugin { + p := NewUIPlugin(target) + p.Version = pluginVersion + p.EntityType = pluginEntityType + p.Company = pluginCompany + p.Key = flexClientPluginKey + p.Name = flexClientPluginName + p.Summary = flexClientPluginSummary + p.Configure = true + p.Insecure = true + + return p +} + +func (p *Plugin) Install(op trace.Operation) error { + defer trace.End(trace.Begin("", op)) + + var err error + if err = p.processInstallParams(op); err != nil { + op.Error(err) + return err + } + vCenterVersion := p.Target.Session.Client.ServiceContent.About.Version + if p.denyInstall(op, vCenterVersion) { + op.Warnf("Refusing to install Flex plugin on vSphere %s", vCenterVersion) + return nil + } + + op.Infof("### Installing UI Plugin against vSphere %s ####", vCenterVersion) + op.Infof("%+v", p.Target.URL) + pInfo := &plugin.Info{ + Company: p.Company, + Key: p.Key, + Name: p.Name, + ServerThumbprint: p.ApplianceServerThumbprint, + ShowInSolutionManager: !p.HideInSolutionManager, + Summary: p.Summary, + Type: "vsphere-client-serenity", + URL: p.ApplianceURL, + Version: p.Version, + } + + if p.EntityType != "" { + pInfo.ManagedEntityInfo = &plugin.ManagedEntityInfo{ + Description: p.Summary, + EntityType: p.EntityType, + } + } + + pl, err := plugin.NewPluginator(op, p.Target.Session, pInfo) + if err != nil { + op.Error(err) + return err + } + + reg, err := pl.IsRegistered(pInfo.Key) + if err != nil { + op.Error(err) + return err + } + if reg { + if p.Force { + op.Info("Removing existing plugin to force install") + err = pl.Unregister(pInfo.Key) + if err != nil { + op.Error(err) + return err + } + op.Info("Removed existing plugin") + } else { + msg := fmt.Sprintf("plugin (%s) is already registered", pInfo.Key) + op.Errorf("Install failed: %s", msg) + return errors.New(msg) + } + } + + op.Info("Installing plugin") + err = pl.Register() + if err != nil { + op.Error(err) + return err + } + + reg, err = pl.IsRegistered(pInfo.Key) + if err != nil { + op.Error(err) + return err + } + if !reg { + msg := fmt.Sprintf("post-install check failed to find %s registered", pInfo.Key) + op.Errorf("Install failed: %s", msg) + return errors.New(msg) + } + + op.Info("Installed UI plugin") + + if p.Configure { + // Configure the OVA vm to be managed by this plugin + if err = ova.ConfigureManagedByInfo(op, p.Target.Session, pInfo.URL); err != nil { + op.Error(err) + return err + } + } + + return nil +} + +func (p *Plugin) Remove(op trace.Operation) error { + defer trace.End(trace.Begin("", op)) + + var err error + if err = p.processRemoveParams(op); err != nil { + op.Error(err) + return err + } + + if p.Force { + op.Info("Ignoring --force") + } + + op.Infof("### Removing UI Plugin ####") + + pInfo := &plugin.Info{ + Key: p.Key, + } + + pl, err := plugin.NewPluginator(op, p.Target.Session, pInfo) + if err != nil { + op.Error(err) + return err + } + reg, err := pl.IsRegistered(pInfo.Key) + if err != nil { + op.Error(err) + return err + } + if reg { + op.Infof("Found target plugin: %s", pInfo.Key) + } else { + msg := fmt.Sprintf("failed to find target plugin (%s)", pInfo.Key) + op.Errorf("Remove failed: %s", msg) + return errors.New(msg) + } + + op.Info("Removing plugin") + err = pl.Unregister(pInfo.Key) + if err != nil { + op.Error(err) + return err + } + + reg, err = pl.IsRegistered(pInfo.Key) + if err != nil { + op.Error(err) + return err + } + if reg { + msg := fmt.Sprintf("post-remove check found %s still registered", pInfo.Key) + op.Errorf("Remove failed: %s", msg) + return errors.New(msg) + } + + op.Info("Removed UI plugin") + return nil +} + +func (p *Plugin) Info(op trace.Operation) error { + defer trace.End(trace.Begin("", op)) + + var err error + if err = p.processInfoParams(op); err != nil { + op.Error(err) + return err + } + + pInfo := &plugin.Info{ + Key: p.Key, + } + + pl, err := plugin.NewPluginator(op, p.Target.Session, pInfo) + if err != nil { + op.Error(err) + return err + } + + reg, err := pl.GetPlugin(p.Key) + if err != nil { + op.Error(err) + return err + } + if reg == nil { + return errors.Errorf("%s is not registered", p.Key) + } + + op.Infof("%s is registered", p.Key) + op.Info("") + op.Infof("Key: %s", reg.Key) + op.Infof("Name: %s", reg.Description.GetDescription().Label) + op.Infof("Summary: %s", reg.Description.GetDescription().Summary) + op.Infof("Company: %s", reg.Company) + op.Infof("Version: %s", reg.Version) + return nil +} + +func (p *Plugin) processInstallParams(op trace.Operation) error { + defer trace.End(trace.Begin("", op)) + + if p.Target.Session == nil { + cancel, err := p.Target.VerifyLogin(op) + defer cancel() + + if err != nil { + op.Error(err) + return err + } + } + + if p.Company == "" { + return errors.New("company must be specified") + } + + if p.Key == "" { + return errors.New("key must be specified") + } + + if p.Name == "" { + return errors.New("name must be specified") + } + + if p.Summary == "" { + return errors.New("summary must be specified") + } + + if p.Version == "" { + return errors.New("version must be specified") + } + + if p.ApplianceHost == "" { + // Obtain the OVA VM's IP + vmIP, err := ip.FirstIPv4(ip.Eth0Interface) + if err != nil { + op.Error(err) + return errors.Errorf("Cannot generate appliance ip: %s", errors.ErrorStack(err)) + } + // Fetch the OVF env to get the fileserver port + ovf, err := lib.UnmarshaledOvfEnv() + if err != nil { + op.Error(err) + return errors.Errorf("Cannot get appliance ovfenv: %s", errors.ErrorStack(err)) + } + p.ApplianceHost = fmt.Sprintf("%s:%s", GetHostname(ovf, vmIP), ovf.Properties["appliance.config_port"]) + op.Debugf("appliance host not specified. generated host: %s", p.ApplianceHost) + + } + if p.ApplianceURL == "" { + p.ApplianceURL = fmt.Sprintf("https://%s/files/%s-v%s.zip", p.ApplianceHost, p.Key, p.Version) + op.Debugf("https plugin url not specified. generated plugin url: %s", p.ApplianceURL) + } + if p.ApplianceServerThumbprint == "" { + var cert object.HostCertificateInfo + if err := cert.FromURL(&url.URL{Host: p.ApplianceHost}, &tls.Config{}); err != nil { + op.Error(err) + return errors.Errorf("Error getting thumbprint for %s: %s", p.ApplianceHost, errors.ErrorStack(err)) + } + p.ApplianceServerThumbprint = cert.ThumbprintSHA1 + op.Debugf("server-thumbprint not specified with HTTPS plugin URL. generated thumbprint: %s", p.ApplianceServerThumbprint) + } + + return nil +} + +func (p *Plugin) processRemoveParams(op trace.Operation) error { + defer trace.End(trace.Begin("", op)) + + if p.Target.Session == nil { + cancel, err := p.Target.VerifyLogin(op) + defer cancel() + + if err != nil { + op.Error(err) + return err + } + } + + if p.Key == "" { + return errors.New("key must be specified") + } + + return nil +} + +func (p *Plugin) processInfoParams(op trace.Operation) error { + defer trace.End(trace.Begin("", op)) + + if p.Target.Session == nil { + cancel, err := p.Target.VerifyLogin(op) + defer cancel() + + if err != nil { + op.Error(err) + return err + } + } + + if p.Key == "" { + return errors.New("key must be specified") + } + return nil +} + +func (p *Plugin) denyInstall(op trace.Operation, version string) bool { + vCenterVersion := strings.Split(version, ".") + + if len(vCenterVersion) < 2 { + op.Debugf("Cannot filter vSphere version (%s) because it is not a semantic version", strings.Join(vCenterVersion, ".")) + return false + } + semver := map[string]string{ + "major": vCenterVersion[0], + "minor": vCenterVersion[1], + } + // Deny install if: + // Plugin is the flex plugin AND + // -- major version us 6 AND + // -- -- minor version is greater than Or equal to 7 OR + // -- major version is greater than or equal to 7 + return p.Key == flexClientPluginKey && + ((semver["major"] == "6" && semver["minor"] == "7") || + (semver["major"] == "6" && strings.Compare(semver["minor"], "7") == 1) || + (strings.Compare(semver["major"], "6") == 1)) + +} diff --git a/installer/fileserver/tasks/plugin/register.go b/installer/fileserver/tasks/plugin/register.go new file mode 100644 index 0000000000..176625fe82 --- /dev/null +++ b/installer/fileserver/tasks/plugin/register.go @@ -0,0 +1,197 @@ +// Copyright 2016-2018 VMware, Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package plugin + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/types" + "github.com/vmware/vic/pkg/trace" + "github.com/vmware/vic/pkg/vsphere/session" +) + +type Info struct { + *ManagedEntityInfo + + Company string + Key string + Name string + ServerThumbprint string + ShowInSolutionManager bool + Summary string + Type string + URL string + Version string +} + +type ManagedEntityInfo struct { + Description string + IconURL string + SmallIconURL string + EntityType string +} + +type Pluginator struct { + Session *session.Session + ExtensionManager *object.ExtensionManager + Context context.Context + + info *Info + op trace.Operation +} + +func NewPluginator(op trace.Operation, sess *session.Session, i *Info) (*Pluginator, error) { + defer trace.End(trace.Begin("")) + + p := &Pluginator{ + Session: sess, + info: i, + op: op, + } + p.Context = op + + err := p.connect() + if err != nil { + return nil, err + } + + return p, nil +} + +func (p *Pluginator) connect() error { + defer trace.End(trace.Begin("")) + + em, err := object.GetExtensionManager(p.Session.Client.Client) + if err != nil { + return fmt.Errorf("failed to get extension manager: %s", err) + } + p.ExtensionManager = em + + return nil +} + +// Register installs an extension to the target +func (p *Pluginator) Register() error { + defer trace.End(trace.Begin("")) + var err error + + desc := types.Description{ + Label: p.info.Name, + Summary: p.info.Summary, + } + + e := types.Extension{ + Key: p.info.Key, + Version: p.info.Version, + Company: p.info.Company, + Description: &desc, + } + + if p.info.ManagedEntityInfo != nil { + e.Type = p.info.EntityType + } + + eci := types.ExtensionClientInfo{ + Version: p.info.Version, + Company: p.info.Company, + Description: &desc, + Type: p.info.Type, + Url: p.info.URL, + } + e.Client = append(e.Client, eci) + + d := types.KeyValue{ + Key: "name", + Value: p.info.Name, + } + + eri := types.ExtensionResourceInfo{ + Locale: "en_US", + Module: "name", + } + + if p.info.ManagedEntityInfo != nil { + mei := types.ExtManagedEntityInfo{ + Description: p.info.ManagedEntityInfo.Description, + Type: p.info.ManagedEntityInfo.EntityType, + } + e.ManagedEntityInfo = append(e.ManagedEntityInfo, mei) + } + + eri.Data = append(eri.Data, d) + + e.ResourceList = append(e.ResourceList, eri) + + // HTTPS requires extension server info + if strings.HasPrefix(strings.ToLower(p.info.URL), "https://") { + esi := types.ExtensionServerInfo{ + Url: p.info.URL, + Description: &desc, + Company: p.info.Company, + Type: "HTTPS", + AdminEmail: []string{"noreply@vmware.com"}, + ServerThumbprint: p.info.ServerThumbprint, + } + e.Server = append(e.Server, esi) + } + + e.ShownInSolutionManager = &p.info.ShowInSolutionManager + + e.LastHeartbeatTime = time.Now().UTC() + + err = p.ExtensionManager.Register(p.Context, e) + if err != nil { + return err + } + + return nil +} + +// Unregister removes an extension from the target +func (p *Pluginator) Unregister(key string) error { + defer trace.End(trace.Begin("")) + + if err := p.ExtensionManager.Unregister(p.Context, key); err != nil { + return err + } + return nil +} + +// IsRegistered checks for presence of an extension on the target +func (p *Pluginator) IsRegistered(key string) (bool, error) { + defer trace.End(trace.Begin("")) + + e, err := p.ExtensionManager.Find(p.Context, key) + if err != nil { + return false, err + } + if e != nil { + p.op.Debugf("%q is registered", key) + return true, nil + } + p.op.Debugf("%q is not registered", key) + return false, nil +} + +// IsRegistered checks for presence of an extension on the target +func (p *Pluginator) GetPlugin(key string) (*types.Extension, error) { + defer trace.End(trace.Begin("")) + + return p.ExtensionManager.Find(p.Context, key) +} diff --git a/installer/fileserver/tasks/plugin_test.go b/installer/fileserver/tasks/plugin_test.go new file mode 100644 index 0000000000..e4423fbec9 --- /dev/null +++ b/installer/fileserver/tasks/plugin_test.go @@ -0,0 +1,72 @@ +// Copyright 2018 VMware, Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/vmware/vic/pkg/trace" +) + +func TestFlexInstallWithBadVersions(t *testing.T) { + op := trace.NewOperation(context.Background(), "TestFlexInstallWithBadVersions") + tests := map[string]bool{ + "6.0": false, + "6.0.0": false, + "6.0.1": false, + "6.5": false, + "6.5.0": false, + "6.5.1": false, + "6.7": true, + "6.7.0": true, + "6.7.1": true, + "7.0": true, + "7.0.0": true, + "7.0.1": true, + } + + p := NewUIPlugin(nil) + p.Key = flexClientPluginKey + for k, v := range tests { + assert.Equal(t, p.denyInstall(op, k), v, "Plugin version %s", k) + } +} + +func TestH5InstallWithAnyVersion(t *testing.T) { + op := trace.NewOperation(context.Background(), "TestH5InstallWithAnyVersion") + tests := map[string]bool{ + "6.0": false, + "6.0.0": false, + "6.0.1": false, + "6.5": false, + "6.5.0": false, + "6.5.1": false, + "6.7": false, + "6.7.0": false, + "6.7.1": false, + "7.0": false, + "7.0.0": false, + "7.0.1": false, + } + + p := NewUIPlugin(nil) + p.Key = h5ClientPluginKey + for k, v := range tests { + assert.Equal(t, p.denyInstall(op, k), v, "Plugin version %s", k) + } +} diff --git a/installer/fileserver/tasks/register.go b/installer/fileserver/tasks/register.go new file mode 100644 index 0000000000..028971fa21 --- /dev/null +++ b/installer/fileserver/tasks/register.go @@ -0,0 +1,198 @@ +// Copyright 2017-2018 VMware, Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "fmt" + "io/ioutil" + "net" + "os/exec" + "strings" + "time" + + log "github.com/Sirupsen/logrus" + + "github.com/vmware/vic-product/installer/fileserver/tasks/tagvm" + "github.com/vmware/vic-product/installer/lib" + "github.com/vmware/vic-product/installer/pkg/ip" + "github.com/vmware/vic/pkg/errors" + "github.com/vmware/vic/pkg/trace" + "github.com/vmware/vic/pkg/vsphere/optmanager" +) + +const ( + // InitServicesTimestamp exists on the local fs when registration first succeeds + InitServicesTimestamp = "./registration-timestamps.txt" + pscBinaryPath = "/etc/vmware/admiral/admiral-auth-psc-1.3.2-SNAPSHOT-command.jar" + vcHostnameOption = "config.vpxd.hostnameUrl" + pscConfDir = "/etc/vmware/psc" + pscConfFileName = "psc-config.properties" +) + +// PSCRegistrationConfig holds the required data for a psc registration +type PSCRegistrationConfig struct { + Admin *lib.LoginInfo + PscInstance string + PscDomain string +} + +// NewPSCRegistrationConfig returns a PSCRegistrationConfig with a initialized Admin LoginInfo type +func NewPSCRegistrationConfig() *PSCRegistrationConfig { + return &PSCRegistrationConfig{ + Admin: &lib.LoginInfo{}, + } +} + +// RegisterAppliance runs the three processes required to register the appliance: +// TagVM, RegisterWithPSC, and SaveInitializationState +func (conf *PSCRegistrationConfig) RegisterAppliance(op trace.Operation) error { + if err := tagvm.Run(op, conf.Admin.Session); err != nil { + op.Debug(errors.ErrorStack(err)) + return errors.New("Failed to locate VIC Appliance. Please check the vCenter Server provided and try again") + } + + if err := conf.RegisterWithPSC(op); err != nil { + op.Debug(errors.ErrorStack(err)) + return errors.New("Failed to register with PSC. Please check the PSC settings provided and try again") + } + + if err := ioutil.WriteFile(InitServicesTimestamp, []byte(time.Now().String()), 0644); err != nil { + op.Debug(errors.ErrorStack(err)) + return errors.New("Failed to write to timestamp file") + } + + return nil +} + +// RegisterWithPSC runs the PSC register command to register VIC services with +// the platforms services controller. The command generates config files and +// keystore files to use while getting and renewing tokens. +func (conf *PSCRegistrationConfig) RegisterWithPSC(op trace.Operation) error { + var err error + + // Use vSphere as the psc instance if external psc was not supplied + if conf.PscInstance == "" { + // Obtain the hostname of the vCenter host to use as PSC instance + conf.PscInstance, err = optmanager.QueryOptionValue(op.Context, conf.Admin.Session, vcHostnameOption) + if err != nil { + return err + } + } + + // Use vSphere or user's domain as the psc domain if external psc was not supplied + if conf.PscDomain == "" { + // Obtain the Admin user's domain + conf.PscDomain = "vsphere.local" + userFields := strings.SplitN(conf.Admin.User, "@", 2) + if len(userFields) == 2 { + conf.PscDomain = userFields[1] + } + } + + op.Infof("vCenter user: %s", conf.Admin.User) + op.Infof("PSC instance: %s", conf.PscInstance) + op.Infof("PSC domain: %s", conf.PscDomain) + + // Obtain the OVA VM's IP + vmIP, err := ip.FirstIPv4(ip.Eth0Interface) + if err != nil { + return err + } + + // Fetch the OVF env to get the Admiral port + ovf, err := lib.UnmarshaledOvfEnv() + if err != nil { + return err + } + admiralPort := ovf.Properties["management_portal.management_portal_port"] + + // Out of the box users + defCreateUsers, foundCreateUsers := ovf.Properties["default_users.create_def_users"] + defPrefix, foundPrefix := ovf.Properties["default_users.def_user_prefix"] + defPassword, foundPassword := ovf.Properties["default_users.def_user_password"] + + op.Infof("PSC Out of the box users. CreateUsers: %s, FoundCreateUsers: %v, Prefix: %s", + defCreateUsers, foundCreateUsers, defPrefix) + + // Register all VIC components with PSC + cmdName := "/usr/bin/java" + for _, client := range []string{"harbor", "engine", "admiral"} { + + cmdArgs := []string{ + "-jar", + pscBinaryPath, + "--command=register", + "--clientName=" + client, + // NOTE(anchal): version set to 6.0 to use SAML for both versions 6.0 and 6.5 + "--version=6.0", + "--tenant=" + conf.PscDomain, + "--domainController=" + conf.PscInstance, + "--username=" + conf.Admin.User, + "--password=" + conf.Admin.Password, + "--admiralUrl=" + fmt.Sprintf("https://%s:%s", GetHostname(ovf, vmIP), admiralPort), + "--configDir=" + pscConfDir, + } + + if client == "admiral" && foundCreateUsers && strings.ToLower(defCreateUsers) == "true" { + if foundPrefix && defPrefix != "" { + arg := "--defaultUserPrefix=" + defPrefix + cmdArgs = append(cmdArgs, arg) + } + + if foundPassword && defPrefix != "" && defPassword != "" { + arg := "--defaultUserPassword=" + defPassword + cmdArgs = append(cmdArgs, arg) + } + } + + // #nosec: Subprocess launching with variable. + // This runs the PSC tool's register command. + cmd := exec.Command(cmdName, cmdArgs...) + if output, err := cmd.CombinedOutput(); err != nil { + op.Infof("Error running PSC register command for %s: %s", client, string(output)) + return err + } + op.Infof("Successfully registered %s with PSC", client) + } + + return nil +} + +// GetHostname returns the non-transient hostname of the Appliance +func GetHostname(ovf lib.Environment, vmIP net.IP) string { + + // Until we gix transient hostnames, use the static hostname reported by hostnamectl. + // os.Hostname() returns the kernel hostname, with no regard to transient or static classifications. + // fqdn, err := os.Hostname() + // var url string + // if err == nil && fqdn != "" { + // return fqdn + // } else { + // return vmIP.String() + // } + + command := "hostnamectl status --static" + // #nosec: Subprocess launching with variable. + out, err := exec.Command("/bin/bash", "-c", command).Output() + if err != nil { + log.Errorf(err.Error()) + return vmIP.String() + } + outString := strings.TrimSpace(string(out)) + if outString == "" { + return vmIP.String() + } + return outString +} diff --git a/installer/fileserver/tasks/tagvm/tagvm.go b/installer/fileserver/tasks/tagvm/tagvm.go new file mode 100644 index 0000000000..5fd2d2e935 --- /dev/null +++ b/installer/fileserver/tasks/tagvm/tagvm.go @@ -0,0 +1,100 @@ +// Copyright 2016-2018 VMware, Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tagvm + +import ( + "net/url" + + "github.com/vmware/govmomi/object" + "github.com/vmware/vic/lib/guest" + "github.com/vmware/vic/pkg/errors" + "github.com/vmware/vic/pkg/trace" + "github.com/vmware/vic/pkg/vsphere/session" + "github.com/vmware/vic/pkg/vsphere/tags" +) + +const ( + VicProductCategory = "VsphereIntegratedContainers" + VicProductDescription = "VIC product" + VicProductType = "VirtualMachine" + ProductVMTag = "ProductVM" + ProductVMDescription = "Product VM" +) + +func setupClient(op trace.Operation, sess *session.Session) (*tags.RestClient, error) { + endpoint, err := url.Parse(sess.Service) + client := tags.NewClient(endpoint, sess.Insecure, sess.Thumbprint) + err = client.Login(op) + if err != nil { + op.Errorf("failed to connect rest API for %s", errors.ErrorStack(err)) + return client, errors.Errorf("Rest is not accessible") + } + + return client, nil +} + +func createProductVMtag(op trace.Operation, client *tags.RestClient) (string, error) { + // create category first, then create tag + categoryID, err := client.CreateCategoryIfNotExist(op, VicProductCategory, VicProductDescription, VicProductType, false) + if err != nil { + return "", errors.Errorf("failed to create vic product category: %s", errors.ErrorStack(err)) + } + + tagID, err := client.CreateTagIfNotExist(op, ProductVMTag, ProductVMDescription, *categoryID) + if err != nil { + return "", errors.Errorf("failed to create product vm tag: %s", errors.ErrorStack(err)) + } + + return *tagID, nil +} + +func attachTag(op trace.Operation, client *tags.RestClient, sess *session.Session, tagID string, vm *object.VirtualMachine) error { + if tagID == "" || sess == nil { + return errors.Errorf("failed to attach product vm tag") + } + + err := client.AttachTagToObject(op, tagID, vm.Reference().Value, vm.Reference().Type) + if err != nil { + return errors.Errorf("failed to apply the tag on product vm : %s", errors.ErrorStack(err)) + } + + op.Debugf("successfully attached the product tag") + return nil +} + +// Run takes in a url and session and tag the ova vm. +func Run(op trace.Operation, sess *session.Session) error { + client, err := setupClient(op, sess) + if err != nil { + return err + } + + tagID, err := createProductVMtag(op, client) + if err != nil { + return err + } + + vm, err := guest.GetSelf(op, sess) + if err != nil { + return errors.Errorf("failed to get product vm : %s", errors.ErrorStack(err)) + } + + err = attachTag(op, client, sess, tagID, vm) + if err != nil { + return err + } + + return nil +} diff --git a/installer/lib/login.go b/installer/lib/login.go index de38ca8754..5029db2da5 100644 --- a/installer/lib/login.go +++ b/installer/lib/login.go @@ -1,4 +1,4 @@ -// Copyright 2017 VMware, Inc. All Rights Reserved. +// Copyright 2017-2018 VMware, Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -20,61 +20,74 @@ import ( "net/url" "time" - log "github.com/Sirupsen/logrus" - - "github.com/vmware/vic/lib/install/data" - "github.com/vmware/vic/lib/install/validate" + "github.com/vmware/vic-product/installer/pkg/version" + "github.com/vmware/vic/pkg/errors" "github.com/vmware/vic/pkg/trace" + "github.com/vmware/vic/pkg/vsphere/session" ) const loginTimeout = 15 * time.Second +// LoginInfo represents credentials needed to access vSphere type LoginInfo struct { - Target string `json:"target"` - User string `json:"user"` - Password string `json:"password"` - Validator *validate.Validator + Target string `json:"target"` + User string `json:"user"` + Password string `json:"password"` + Thumbprint string `json:"thumbprint"` + URL *url.URL + Session *session.Session } -// Verify login based on info given, return non nil error if validation fails. -func (info *LoginInfo) VerifyLogin() (context.CancelFunc, error) { +// VerifyLogin based on info given, return non nil error if validation fails. +func (info *LoginInfo) VerifyLogin(op trace.Operation) (context.CancelFunc, error) { defer trace.End(trace.Begin("")) - var u url.URL - u.User = url.UserPassword(info.User, info.Password) - u.Host = info.Target - u.Path = "" - log.Infof("server URL: %v\n", u.Host) - - input := data.NewData() - - username := u.User.Username() - input.OpsCredentials.OpsUser = &username - passwd, _ := u.User.Password() - input.OpsCredentials.OpsPassword = &passwd - input.URL = &u - input.Force = true + info.URL = &url.URL{ + Scheme: "https", + Host: info.Target, + User: url.UserPassword(info.User, info.Password), + Path: "", + } - input.User = username - input.Password = &passwd + op.Infof("server URL: %v\n", info.URL.Host) ctx, cancel := context.WithTimeout(context.Background(), loginTimeout) loginResponse := make(chan error, 1) - var v *validate.Validator var err error go func() { - v, err = validate.NewValidator(ctx, input) - info.Validator = v + if info.Thumbprint == "" { + err = errors.New("Thumbprint is empty") + op.Errorf("%s", err) + loginResponse <- err + return + } + + sessionconfig := &session.Config{ + Thumbprint: info.Thumbprint, + UserAgent: version.UserAgent("vic-appliance"), + Service: info.URL.String(), + } + + info.Session = session.NewSession(sessionconfig) + info.Session, err = info.Session.Connect(op) + if err != nil { + op.Errorf("failed to connect: %s", err) + loginResponse <- err + return + } + + // #nosec: Errors unhandled. + info.Session.Populate(op) loginResponse <- err }() select { case <-ctx.Done(): - loginResponse <- fmt.Errorf("login failed; validator context exceeded") + loginResponse <- fmt.Errorf("login failed; session context deadline exceeded") case err := <-loginResponse: if err != nil { - log.Infof("validator: %s", err) + op.Infof("session: %s", err) loginResponse <- err } else { loginResponse <- nil diff --git a/installer/lib/tls.go b/installer/lib/tls.go index ea581f8216..29ab444a19 100644 --- a/installer/lib/tls.go +++ b/installer/lib/tls.go @@ -1,4 +1,4 @@ -// Copyright 2017 VMware, Inc. All Rights Reserved. +// Copyright 2017-2018 VMware, Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,32 +16,27 @@ package lib import ( "crypto/tls" - "net/http" ) -func GetTLSServer(addr string, handler http.Handler, cert tls.Certificate) *http.Server { +func GetTLSServerConfig(cert tls.Certificate) *tls.Config { // forcing tls 1.1, cipher from https://github.com/denji/golang-tls#perfect-ssl-labs-score-with-go // and https://wiki.mozilla.org/Security/TLS_Configurations#Go - return &http.Server{ - Addr: addr, - Handler: handler, - TLSConfig: &tls.Config{ - MinVersion: tls.VersionTLS11, - CurvePreferences: []tls.CurveID{tls.CurveP521, tls.CurveP384, tls.CurveP256}, - PreferServerCipherSuites: true, - CipherSuites: []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_RSA_WITH_AES_128_CBC_SHA, - }, - Certificates: []tls.Certificate{cert}, + return &tls.Config{ + MinVersion: tls.VersionTLS11, + CurvePreferences: []tls.CurveID{tls.CurveP521, tls.CurveP384, tls.CurveP256}, + PreferServerCipherSuites: true, + CipherSuites: []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_RSA_WITH_AES_128_CBC_SHA, }, + Certificates: []tls.Certificate{cert}, } } diff --git a/installer/scripts/ci-build.sh b/installer/scripts/ci-build.sh index 5943e0a787..0851c2d60d 100755 --- a/installer/scripts/ci-build.sh +++ b/installer/scripts/ci-build.sh @@ -32,6 +32,9 @@ fi if [ -n "${VICENGINE}" ]; then OPTIONS="$OPTIONS --vicengine $VICENGINE" fi +if [ -n "${VICUI}" ]; then + OPTIONS="$OPTIONS --vicui $VICUI" +fi if [ -n "${VIC_MACHINE_SERVER}" ]; then OPTIONS="$OPTIONS --vicmachineserver $VIC_MACHINE_SERVER" fi @@ -57,6 +60,15 @@ if [[ ( "$DRONE_BUILD_EVENT" == "tag" && "$DRONE_TAG" != *"dev"* ) || "$DRONE_BR vicengine_release=$(gsutil ls -l "$bucket" | grep -v TOTAL | grep vic_ | sort -k2 -r | (trap '' PIPE; head -1) | xargs | cut -d ' ' -f 3 | sed 's/gs:\/\//https:\/\/storage.googleapis.com\//') OPTIONS="$OPTIONS --vicengine ${vicengine_release:?Unable to find an appropriate VIC Engine build. Is '"'$DRONE_BRANCH'"' a valid vmware/vic branch?}" fi + if [ -z "${VICUI}" ]; then + if [[ "$DRONE_BUILD_EVENT" != "tag" || "$DRONE_TAG" == *"dev"* ]]; then + bucket="gs://vic-ui-builds/$DRONE_BRANCH" + else + bucket="gs://vic-ui-releases" + fi + vicui_release=$(gsutil ls -l "$bucket" | grep -v TOTAL | grep vic_ | sort -k2 -r | (trap '' PIPE; head -1) | xargs | cut -d " " -f 3 | sed 's/gs:\/\//https:\/\/storage.googleapis.com\//') + OPTIONS="$OPTIONS --vicui ${vicui_release:?Unable to find an appropriate VIC UI build. Is '"'$DRONE_BRANCH'"' a valid vmware/vic-ui branch?}" + fi if [ -z "${VIC_MACHINE_SERVER}" ]; then # Listing container tags requires permissions if [ -z "$(gcloud auth list --filter=status:ACTIVE --format='value(account)')" ]; then diff --git a/installer/tagvm/tagvm.go b/installer/tagvm/tagvm.go deleted file mode 100644 index a19966185a..0000000000 --- a/installer/tagvm/tagvm.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2016-2017 VMware, Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tagvm - -import ( - "context" - "net/url" - - log "github.com/Sirupsen/logrus" - - "github.com/vmware/govmomi/object" - "github.com/vmware/vic/lib/guest" - "github.com/vmware/vic/pkg/errors" - "github.com/vmware/vic/pkg/vsphere/session" - "github.com/vmware/vic/pkg/vsphere/tags" -) - -const ( - VicProductCategory = "VsphereIntegratedContainers" - VicProductDescription = "VIC product" - VicProductType = "VirtualMachine" - ProductVMTag = "ProductVM" - ProductVMDescription = "Product VM" -) - -func setupClient(ctx context.Context, sess *session.Session) (*tags.RestClient, error) { - endpoint, err := url.Parse(sess.Service) - client := tags.NewClient(endpoint, sess.Insecure, sess.Thumbprint) - err = client.Login(ctx) - if err != nil { - log.Errorf("failed to connect rest API for %s", errors.ErrorStack(err)) - return client, errors.Errorf("Rest is not accessible") - } - - return client, nil -} - -func createProductVMtag(ctx context.Context, client *tags.RestClient) (string, error) { - // create category first, then create tag - categoryID, err := client.CreateCategoryIfNotExist(ctx, VicProductCategory, VicProductDescription, VicProductType, false) - if err != nil { - return "", errors.Errorf("failed to create vic product category: %s", errors.ErrorStack(err)) - } - - tagID, err := client.CreateTagIfNotExist(ctx, ProductVMTag, ProductVMDescription, *categoryID) - if err != nil { - return "", errors.Errorf("failed to create product vm tag: %s", errors.ErrorStack(err)) - } - - return *tagID, nil -} - -func attachTag(ctx context.Context, client *tags.RestClient, sess *session.Session, tagID string, vm *object.VirtualMachine) error { - if tagID == "" || sess == nil { - return errors.Errorf("failed to attach product vm tag") - } - - err := client.AttachTagToObject(ctx, tagID, vm.Reference().Value, vm.Reference().Type) - if err != nil { - return errors.Errorf("failed to apply the tag on product vm : %s", errors.ErrorStack(err)) - } - - log.Debugf("successfully attached the product tag") - return nil -} - -// Run takes in a url and session and tag the ova vm. -func Run(ctx context.Context, sess *session.Session) error { - client, err := setupClient(ctx, sess) - if err != nil { - return err - } - - tagID, err := createProductVMtag(ctx, client) - if err != nil { - return err - } - - vm, err := guest.GetSelf(ctx, sess) - if err != nil { - return errors.Errorf("failed to get product vm : %s", errors.ErrorStack(err)) - } - - err = attachTag(ctx, client, sess, tagID, vm) - if err != nil { - return err - } - - return nil -} diff --git a/installer/vendor/github.com/GoASTScanner/gas/.gitignore b/installer/vendor/github.com/GoASTScanner/gas/.gitignore deleted file mode 100644 index b0326bdbf8..0000000000 --- a/installer/vendor/github.com/GoASTScanner/gas/.gitignore +++ /dev/null @@ -1,30 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so -*.swp -gas - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof - -.DS_Store - -.vscode diff --git a/installer/vendor/github.com/GoASTScanner/gas/.travis.yml b/installer/vendor/github.com/GoASTScanner/gas/.travis.yml deleted file mode 100644 index 2c253a756e..0000000000 --- a/installer/vendor/github.com/GoASTScanner/gas/.travis.yml +++ /dev/null @@ -1,6 +0,0 @@ -language: go -before_script: - - go vet $(go list ./... | grep -v /vendor/) -go: - - 1.5 - - tip diff --git a/installer/vendor/github.com/GoASTScanner/gas/Dockerfile b/installer/vendor/github.com/GoASTScanner/gas/Dockerfile deleted file mode 100644 index 7fea92ca2f..0000000000 --- a/installer/vendor/github.com/GoASTScanner/gas/Dockerfile +++ /dev/null @@ -1,23 +0,0 @@ -# Docker version must be 17.05 or higher to allow multistage build -# See build and run instructions in README.md - -# Builds Gas for utilization -FROM golang:1.8.1-alpine as builder -ENV workspace /go/src/github.com/GoASTScanner/gas -ENV GOPATH /go -COPY . $workspace -WORKDIR $workspace - -RUN go vet $(go list ./... | grep -v /vendor/) -RUN CGO_ENABLED=0 go build -o gas . - -######################################################## - -# Runs Gas on all Go files in the current directory when -# 'docker run' command in README is given -FROM alpine:3.6 - -COPY --from=builder /go/src/github.com/GoASTScanner/gas/gas / - -# Mounted directory should be placed into the workdir -CMD /gas $(find . -path ./vendor -prune -o -type f -name "*.go") diff --git a/installer/vendor/github.com/GoASTScanner/gas/README.md b/installer/vendor/github.com/GoASTScanner/gas/README.md deleted file mode 100644 index 81c2ed5e3f..0000000000 --- a/installer/vendor/github.com/GoASTScanner/gas/README.md +++ /dev/null @@ -1,131 +0,0 @@ - - -## GAS - Go AST Scanner - -Inspects source code for security problems by scanning the Go AST. - -### License - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License [here](http://www.apache.org/licenses/LICENSE-2.0). - -### Project status - -[![Build Status](https://travis-ci.org/GoASTScanner/gas.svg?branch=master)](https://travis-ci.org/GoASTScanner/gas) -[![GoDoc](https://godoc.org/github.com/GoASTScanner/gas?status.svg)](https://godoc.org/github.com/GoASTScanner/gas) - -Gas is still in alpha and accepting feedback from early adopters. We do -not consider it production ready at this time. - -### Usage - -Gas can be configured to only run a subset of rules, to exclude certain file -paths, and produce reports in different formats. By default all rules will be -run against the supplied input files. To recursively scan from the current -directory you can supply './...' as the input argument. - -#### Selecting rules - -By default Gas will run all rules against the supplied file paths. It is however possible to select a subset of rules to run via the '-include=' flag, -or to specify a set of rules to explicitly exclude using the '-exclude=' flag. - -##### Available rules - - - G101: Look for hardcoded credentials - - G102: Bind to all interfaces - - G103: Audit the use of unsafe block - - G104: Audit errors not checked - - G105: Audit the use of math/big.Int.Exp - - G201: SQL query construction using format string - - G202: SQL query construction using string concatenation - - G203: Use of unescaped data in HTML templates - - G204: Audit use of command execution - - G301: Poor file permissions used when creating a directory - - G302: Poor file permisions used with chmod - - G303: Creating tempfile using a predictable path - - G401: Detect the usage of DES, RC4, or MD5 - - G402: Look for bad TLS connection settings - - G403: Ensure minimum RSA key length of 2048 bits - - G404: Insecure random number source (rand) - - G501: Import blacklist: crypto/md5 - - G502: Import blacklist: crypto/des - - G503: Import blacklist: crypto/rc4 - - G504: Import blacklist: net/http/cgi - - -``` -# Run a specific set of rules -$ gas -include=G101,G203,G401 ./... - -# Run everything except for rule G303 -$ gas -exclude=G303 ./... -``` - -#### Excluding files: - -Gas can be told to \ignore paths that match a supplied pattern using the 'skip' command line option. This is -accomplished via [go-glob](github.com/ryanuber/go-glob). Multiple patterns can be specified as follows: - -``` -$ gas -skip=tests* -skip=*_example.go ./... -``` - -#### Annotating code - -As with all automated detection tools there will be cases of false positives. In cases where Gas reports a failure that has been manually verified as being safe it is possible to annotate the code with a '#nosec' comment. - -The annotation causes Gas to stop processing any further nodes within the -AST so can apply to a whole block or more granularly to a single expression. - -```go - -import "md5" // #nosec - - -func main(){ - - /* #nosec */ - if x > y { - h := md5.New() // this will also be ignored - } - -} - -``` - -In some cases you may also want to revisit places where #nosec annotations -have been used. To run the scanner and ignore any #nosec annotations you -can do the following: - -``` -$ gas -nosec=true ./... -``` - -### Output formats - -Gas currently supports text, json and csv output formats. By default -results will be reported to stdout, but can also be written to an output -file. The output format is controlled by the '-fmt' flag, and the output file is controlled by the '-out' flag as follows: - -``` -# Write output in json format to results.json -$ gas -fmt=json -out=results.json *.go -``` - -### Docker container - -A Dockerfile is included with the Gas source code to provide a container that -allows users to easily run Gas on their code. It builds Gas, then runs it on -all Go files in your current directory. Use the following commands to build -and run locally: - -To build: (run command in cloned Gas source code directory) - docker build --build-arg http_proxy --build-arg https_proxy - --build-arg no_proxy -t goastscanner/gas:latest . - -To run: (run command in desired directory with Go files) - docker run -v $PWD:$PWD --workdir $PWD goastscanner/gas:latest - -Note: Docker version 17.05 or later is required (to permit multistage build). -``` diff --git a/installer/vendor/github.com/GoASTScanner/gas/core/call_list_test.go b/installer/vendor/github.com/GoASTScanner/gas/core/call_list_test.go deleted file mode 100644 index ef582937bd..0000000000 --- a/installer/vendor/github.com/GoASTScanner/gas/core/call_list_test.go +++ /dev/null @@ -1,60 +0,0 @@ -package core - -import ( - "go/ast" - "testing" -) - -type callListRule struct { - MetaData - callList CallList - matched int -} - -func (r *callListRule) Match(n ast.Node, c *Context) (gi *Issue, err error) { - if r.callList.ContainsCallExpr(n, c) { - r.matched += 1 - } - return nil, nil -} - -func TestCallListContainsCallExpr(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := NewAnalyzer(config, nil) - calls := NewCallList() - calls.AddAll("bytes.Buffer", "Write", "WriteTo") - rule := &callListRule{ - MetaData: MetaData{ - Severity: Low, - Confidence: Low, - What: "A dummy rule", - }, - callList: calls, - matched: 0, - } - analyzer.AddRule(rule, []ast.Node{(*ast.CallExpr)(nil)}) - source := ` - package main - import ( - "bytes" - "fmt" - ) - func main() { - var b bytes.Buffer - b.Write([]byte("Hello ")) - fmt.Fprintf(&b, "world!") - }` - - analyzer.ProcessSource("dummy.go", source) - if rule.matched != 1 { - t.Errorf("Expected to match a bytes.Buffer.Write call") - } -} - -func TestCallListContains(t *testing.T) { - callList := NewCallList() - callList.Add("fmt", "Printf") - if !callList.Contains("fmt", "Printf") { - t.Errorf("Expected call list to contain fmt.Printf") - } -} diff --git a/installer/vendor/github.com/GoASTScanner/gas/core/helpers_test.go b/installer/vendor/github.com/GoASTScanner/gas/core/helpers_test.go deleted file mode 100644 index 1a7bcdad28..0000000000 --- a/installer/vendor/github.com/GoASTScanner/gas/core/helpers_test.go +++ /dev/null @@ -1,71 +0,0 @@ -package core - -import ( - "go/ast" - "testing" -) - -type dummyCallback func(ast.Node, *Context, string, ...string) (*ast.CallExpr, bool) - -type dummyRule struct { - MetaData - pkgOrType string - funcsOrMethods []string - callback dummyCallback - callExpr []ast.Node - matched int -} - -func (r *dummyRule) Match(n ast.Node, c *Context) (gi *Issue, err error) { - if callexpr, matched := r.callback(n, c, r.pkgOrType, r.funcsOrMethods...); matched { - r.matched += 1 - r.callExpr = append(r.callExpr, callexpr) - } - return nil, nil -} - -func TestMatchCallByType(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := NewAnalyzer(config, nil) - rule := &dummyRule{ - MetaData: MetaData{ - Severity: Low, - Confidence: Low, - What: "A dummy rule", - }, - pkgOrType: "bytes.Buffer", - funcsOrMethods: []string{"Write"}, - callback: MatchCallByType, - callExpr: []ast.Node{}, - matched: 0, - } - analyzer.AddRule(rule, []ast.Node{(*ast.CallExpr)(nil)}) - source := ` - package main - import ( - "bytes" - "fmt" - ) - func main() { - var b bytes.Buffer - b.Write([]byte("Hello ")) - fmt.Fprintf(&b, "world!") - }` - - analyzer.ProcessSource("dummy.go", source) - if rule.matched != 1 || len(rule.callExpr) != 1 { - t.Errorf("Expected to match a bytes.Buffer.Write call") - } - - typeName, callName, err := GetCallInfo(rule.callExpr[0], analyzer.context) - if err != nil { - t.Errorf("Unable to resolve call info: %v\n", err) - } - if typeName != "bytes.Buffer" { - t.Errorf("Expected: %s, Got: %s\n", "bytes.Buffer", typeName) - } - if callName != "Write" { - t.Errorf("Expected: %s, Got: %s\n", "Write", callName) - } - -} diff --git a/installer/vendor/github.com/GoASTScanner/gas/filelist_test.go b/installer/vendor/github.com/GoASTScanner/gas/filelist_test.go deleted file mode 100644 index eaa3cd63cb..0000000000 --- a/installer/vendor/github.com/GoASTScanner/gas/filelist_test.go +++ /dev/null @@ -1,251 +0,0 @@ -package main - -import ( - "reflect" - "testing" -) - -func Test_newFileList(t *testing.T) { - type args struct { - paths []string - } - tests := []struct { - name string - args args - want *fileList - }{ - { - name: "nil paths", - args: args{paths: nil}, - want: &fileList{patterns: map[string]struct{}{}}, - }, - { - name: "empty paths", - args: args{paths: []string{}}, - want: &fileList{patterns: map[string]struct{}{}}, - }, - { - name: "have paths", - args: args{paths: []string{"*_test.go"}}, - want: &fileList{patterns: map[string]struct{}{ - "*_test.go": struct{}{}, - }}, - }, - } - for _, tt := range tests { - if got := newFileList(tt.args.paths...); !reflect.DeepEqual(got, tt.want) { - t.Errorf("%q. newFileList() = %v, want %v", tt.name, got, tt.want) - } - } -} - -func Test_fileList_String(t *testing.T) { - type fields struct { - patterns []string - } - tests := []struct { - name string - fields fields - want string - }{ - { - name: "nil patterns", - fields: fields{patterns: nil}, - want: "", - }, - { - name: "empty patterns", - fields: fields{patterns: []string{}}, - want: "", - }, - { - name: "one pattern", - fields: fields{patterns: []string{"foo"}}, - want: "foo", - }, - { - name: "two patterns", - fields: fields{patterns: []string{"bar", "foo"}}, - want: "bar, foo", - }, - } - for _, tt := range tests { - f := newFileList(tt.fields.patterns...) - if got := f.String(); got != tt.want { - t.Errorf("%q. fileList.String() = %v, want %v", tt.name, got, tt.want) - } - } -} - -func Test_fileList_Set(t *testing.T) { - type fields struct { - patterns []string - } - type args struct { - path string - } - tests := []struct { - name string - fields fields - args args - want map[string]struct{} - wantErr bool - }{ - { - name: "add empty path", - fields: fields{patterns: nil}, - args: args{path: ""}, - want: map[string]struct{}{}, - wantErr: false, - }, - { - name: "add path to nil patterns", - fields: fields{patterns: nil}, - args: args{path: "foo"}, - want: map[string]struct{}{ - "foo": struct{}{}, - }, - wantErr: false, - }, - { - name: "add path to empty patterns", - fields: fields{patterns: []string{}}, - args: args{path: "foo"}, - want: map[string]struct{}{ - "foo": struct{}{}, - }, - wantErr: false, - }, - { - name: "add path to populated patterns", - fields: fields{patterns: []string{"foo"}}, - args: args{path: "bar"}, - want: map[string]struct{}{ - "foo": struct{}{}, - "bar": struct{}{}, - }, - wantErr: false, - }, - } - for _, tt := range tests { - f := newFileList(tt.fields.patterns...) - if err := f.Set(tt.args.path); (err != nil) != tt.wantErr { - t.Errorf("%q. fileList.Set() error = %v, wantErr %v", tt.name, err, tt.wantErr) - } - if !reflect.DeepEqual(f.patterns, tt.want) { - t.Errorf("%q. got state fileList.patterns = %v, want state %v", tt.name, f.patterns, tt.want) - } - } -} - -func Test_fileList_Contains(t *testing.T) { - type fields struct { - patterns []string - } - type args struct { - path string - } - tests := []struct { - name string - fields fields - args args - want bool - }{ - { - name: "nil patterns", - fields: fields{patterns: nil}, - args: args{path: "foo"}, - want: false, - }, - { - name: "empty patterns", - fields: fields{patterns: nil}, - args: args{path: "foo"}, - want: false, - }, - { - name: "one pattern, no wildcard, no match", - fields: fields{patterns: []string{"foo"}}, - args: args{path: "bar"}, - want: false, - }, - { - name: "one pattern, no wildcard, match", - fields: fields{patterns: []string{"foo"}}, - args: args{path: "foo"}, - want: true, - }, - { - name: "one pattern, wildcard prefix, match", - fields: fields{patterns: []string{"*foo"}}, - args: args{path: "foo"}, - want: true, - }, - { - name: "one pattern, wildcard suffix, match", - fields: fields{patterns: []string{"foo*"}}, - args: args{path: "foo"}, - want: true, - }, - { - name: "one pattern, wildcard both ends, match", - fields: fields{patterns: []string{"*foo*"}}, - args: args{path: "foo"}, - want: true, - }, - { - name: "default test match 1", - fields: fields{patterns: []string{"*_test.go"}}, - args: args{path: "foo_test.go"}, - want: true, - }, - { - name: "default test match 2", - fields: fields{patterns: []string{"*_test.go"}}, - args: args{path: "bar/foo_test.go"}, - want: true, - }, - { - name: "default test match 3", - fields: fields{patterns: []string{"*_test.go"}}, - args: args{path: "/bar/foo_test.go"}, - want: true, - }, - { - name: "default test match 4", - fields: fields{patterns: []string{"*_test.go"}}, - args: args{path: "baz/bar/foo_test.go"}, - want: true, - }, - { - name: "default test match 5", - fields: fields{patterns: []string{"*_test.go"}}, - args: args{path: "/baz/bar/foo_test.go"}, - want: true, - }, - { - name: "many patterns, no match", - fields: fields{patterns: []string{"*_one.go", "*_two.go"}}, - args: args{path: "/baz/bar/foo_test.go"}, - want: false, - }, - { - name: "many patterns, match", - fields: fields{patterns: []string{"*_one.go", "*_two.go", "*_test.go"}}, - args: args{path: "/baz/bar/foo_test.go"}, - want: true, - }, - { - name: "sub-folder, match", - fields: fields{patterns: []string{"vendor"}}, - args: args{path: "/baz/vendor/bar/foo_test.go"}, - want: true, - }, - } - for _, tt := range tests { - f := newFileList(tt.fields.patterns...) - if got := f.Contains(tt.args.path); got != tt.want { - t.Errorf("%q. fileList.Contains() = %v, want %v", tt.name, got, tt.want) - } - } -} diff --git a/installer/vendor/github.com/GoASTScanner/gas/main_test.go b/installer/vendor/github.com/GoASTScanner/gas/main_test.go deleted file mode 100644 index b47a4b5183..0000000000 --- a/installer/vendor/github.com/GoASTScanner/gas/main_test.go +++ /dev/null @@ -1,45 +0,0 @@ -package main - -import "testing" - -func Test_shouldInclude(t *testing.T) { - type args struct { - path string - excluded *fileList - } - tests := []struct { - name string - args args - want bool - }{ - { - name: "non .go file", - args: args{ - path: "thing.txt", - excluded: newFileList(), - }, - want: false, - }, - { - name: ".go file, not excluded", - args: args{ - path: "thing.go", - excluded: newFileList(), - }, - want: true, - }, - { - name: ".go file, excluded", - args: args{ - path: "thing.go", - excluded: newFileList("thing.go"), - }, - want: false, - }, - } - for _, tt := range tests { - if got := shouldInclude(tt.args.path, tt.args.excluded); got != tt.want { - t.Errorf("%q. shouldInclude() = %v, want %v", tt.name, got, tt.want) - } - } -} diff --git a/installer/vendor/github.com/GoASTScanner/gas/rules/big_test.go b/installer/vendor/github.com/GoASTScanner/gas/rules/big_test.go deleted file mode 100644 index b533e66bba..0000000000 --- a/installer/vendor/github.com/GoASTScanner/gas/rules/big_test.go +++ /dev/null @@ -1,49 +0,0 @@ -// (c) Copyright 2016 Hewlett Packard Enterprise Development LP -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rules - -import ( - "testing" - - gas "github.com/GoASTScanner/gas/core" -) - -func TestBigExp(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewUsingBigExp(config)) - - issues := gasTestRunner(` - package main - - import ( - "math/big" - ) - - func main() { - z := new(big.Int) - x := new(big.Int) - x = x.SetUint64(2) - y := new(big.Int) - y = y.SetUint64(4) - m := new(big.Int) - m = m.SetUint64(0) - - z = z.Exp(x, y, m) - } - `, analyzer) - - checkTestResults(t, issues, 1, "Use of math/big.Int.Exp function") -} diff --git a/installer/vendor/github.com/GoASTScanner/gas/rules/bind_test.go b/installer/vendor/github.com/GoASTScanner/gas/rules/bind_test.go deleted file mode 100644 index 16bc389900..0000000000 --- a/installer/vendor/github.com/GoASTScanner/gas/rules/bind_test.go +++ /dev/null @@ -1,65 +0,0 @@ -// (c) Copyright 2016 Hewlett Packard Enterprise Development LP -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rules - -import ( - "testing" - - gas "github.com/GoASTScanner/gas/core" -) - -func TestBind0000(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewBindsToAllNetworkInterfaces(config)) - - issues := gasTestRunner(` - package main - import ( - "log" - "net" - ) - func main() { - l, err := net.Listen("tcp", "0.0.0.0:2000") - if err != nil { - log.Fatal(err) - } - defer l.Close() - }`, analyzer) - - checkTestResults(t, issues, 1, "Binds to all network interfaces") -} - -func TestBindEmptyHost(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewBindsToAllNetworkInterfaces(config)) - - issues := gasTestRunner(` - package main - import ( - "log" - "net" - ) - func main() { - l, err := net.Listen("tcp", ":2000") - if err != nil { - log.Fatal(err) - } - defer l.Close() - }`, analyzer) - - checkTestResults(t, issues, 1, "Binds to all network interfaces") -} diff --git a/installer/vendor/github.com/GoASTScanner/gas/rules/blacklist_test.go b/installer/vendor/github.com/GoASTScanner/gas/rules/blacklist_test.go deleted file mode 100644 index 110afd4a10..0000000000 --- a/installer/vendor/github.com/GoASTScanner/gas/rules/blacklist_test.go +++ /dev/null @@ -1,39 +0,0 @@ -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rules - -import ( - gas "github.com/GoASTScanner/gas/core" - "testing" -) - -const initOnlyImportSrc = ` -package main -import ( - _ "crypto/md5" - "fmt" - "os" -) -func main() { - for _, arg := range os.Args { - fmt.Println(arg) - } -}` - -func TestInitOnlyImport(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewBlacklist_crypto_md5(config)) - issues := gasTestRunner(initOnlyImportSrc, analyzer) - checkTestResults(t, issues, 0, "") -} diff --git a/installer/vendor/github.com/GoASTScanner/gas/rules/errors_test.go b/installer/vendor/github.com/GoASTScanner/gas/rules/errors_test.go deleted file mode 100644 index a0d82c9617..0000000000 --- a/installer/vendor/github.com/GoASTScanner/gas/rules/errors_test.go +++ /dev/null @@ -1,144 +0,0 @@ -// (c) Copyright 2016 Hewlett Packard Enterprise Development LP -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rules - -import ( - "testing" - - gas "github.com/GoASTScanner/gas/core" -) - -func TestErrorsMulti(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewNoErrorCheck(config)) - - issues := gasTestRunner( - `package main - - import ( - "fmt" - ) - - func test() (int,error) { - return 0, nil - } - - func main() { - v, _ := test() - fmt.Println(v) - }`, analyzer) - - checkTestResults(t, issues, 1, "Errors unhandled") -} - -func TestErrorsSingle(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewNoErrorCheck(config)) - - issues := gasTestRunner( - `package main - - import ( - "fmt" - ) - - func a() error { - return fmt.Errorf("This is an error") - } - - func b() { - fmt.Println("b") - } - - func c() string { - return fmt.Sprintf("This isn't anything") - } - - func main() { - _ = a() - a() - b() - _ = c() - c() - }`, analyzer) - checkTestResults(t, issues, 2, "Errors unhandled") -} - -func TestErrorsGood(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewNoErrorCheck(config)) - - issues := gasTestRunner( - `package main - - import ( - "fmt" - ) - - func test() err error { - return 0, nil - } - - func main() { - e := test() - }`, analyzer) - - checkTestResults(t, issues, 0, "") -} - -func TestErrorsWhitelisted(t *testing.T) { - config := map[string]interface{}{ - "ignoreNosec": false, - "G104": map[string][]string{ - "compress/zlib": []string{"NewReader"}, - "io": []string{"Copy"}, - }, - } - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewNoErrorCheck(config)) - source := `package main - import ( - "io" - "os" - "fmt" - "bytes" - "compress/zlib" - ) - - func a() error { - return fmt.Errorf("This is an error ok") - } - - func main() { - // Expect at least one failure - _ = a() - - var b bytes.Buffer - // Default whitelist - nbytes, _ := b.Write([]byte("Hello ")) - if nbytes <= 0 { - os.Exit(1) - } - - // Whitelisted via configuration - r, _ := zlib.NewReader(&b) - io.Copy(os.Stdout, r) - }` - issues := gasTestRunner(source, analyzer) - checkTestResults(t, issues, 1, "Errors unhandled") -} diff --git a/installer/vendor/github.com/GoASTScanner/gas/rules/fileperms_test.go b/installer/vendor/github.com/GoASTScanner/gas/rules/fileperms_test.go deleted file mode 100644 index 278c29e2b0..0000000000 --- a/installer/vendor/github.com/GoASTScanner/gas/rules/fileperms_test.go +++ /dev/null @@ -1,56 +0,0 @@ -// (c) Copyright 2016 Hewlett Packard Enterprise Development LP -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rules - -import ( - "testing" - - gas "github.com/GoASTScanner/gas/core" -) - -func TestChmod(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewFilePerms(config)) - - issues := gasTestRunner(` - package main - import "os" - func main() { - os.Chmod("/tmp/somefile", 0777) - os.Chmod("/tmp/someotherfile", 0600) - os.OpenFile("/tmp/thing", os.O_CREATE|os.O_WRONLY, 0666) - os.OpenFile("/tmp/thing", os.O_CREATE|os.O_WRONLY, 0600) - }`, analyzer) - - checkTestResults(t, issues, 2, "Expect file permissions") -} - -func TestMkdir(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewMkdirPerms(config)) - - issues := gasTestRunner(` - package main - import "os" - func main() { - os.Mkdir("/tmp/mydir", 0777) - os.Mkdir("/tmp/mydir", 0600) - os.MkdirAll("/tmp/mydir/mysubidr", 0775) - }`, analyzer) - - checkTestResults(t, issues, 2, "Expect directory permissions") -} diff --git a/installer/vendor/github.com/GoASTScanner/gas/rules/hardcoded_credentials_test.go b/installer/vendor/github.com/GoASTScanner/gas/rules/hardcoded_credentials_test.go deleted file mode 100644 index 63f3db1378..0000000000 --- a/installer/vendor/github.com/GoASTScanner/gas/rules/hardcoded_credentials_test.go +++ /dev/null @@ -1,194 +0,0 @@ -// (c) Copyright 2016 Hewlett Packard Enterprise Development LP -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rules - -import ( - "testing" - - gas "github.com/GoASTScanner/gas/core" -) - -func TestHardcoded(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewHardcodedCredentials(config)) - - issues := gasTestRunner( - ` - package samples - - import "fmt" - - func main() { - username := "admin" - password := "f62e5bcda4fae4f82370da0c6f20697b8f8447ef" - fmt.Println("Doing something with: ", username, password) - }`, analyzer) - - checkTestResults(t, issues, 1, "Potential hardcoded credentials") -} - -func TestHardcodedWithEntropy(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewHardcodedCredentials(config)) - - issues := gasTestRunner( - ` - package samples - - import "fmt" - - func main() { - username := "admin" - password := "secret" - fmt.Println("Doing something with: ", username, password) - }`, analyzer) - - checkTestResults(t, issues, 0, "Potential hardcoded credentials") -} - -func TestHardcodedIgnoreEntropy(t *testing.T) { - config := map[string]interface{}{ - "ignoreNosec": false, - "G101": map[string]string{ - "ignore_entropy": "true", - }, - } - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewHardcodedCredentials(config)) - - issues := gasTestRunner( - ` - package samples - - import "fmt" - - func main() { - username := "admin" - password := "admin" - fmt.Println("Doing something with: ", username, password) - }`, analyzer) - - checkTestResults(t, issues, 1, "Potential hardcoded credentials") -} - -func TestHardcodedGlobalVar(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewHardcodedCredentials(config)) - - issues := gasTestRunner(` - package samples - - import "fmt" - - var password = "f62e5bcda4fae4f82370da0c6f20697b8f8447ef" - - func main() { - username := "admin" - fmt.Println("Doing something with: ", username, password) - }`, analyzer) - - checkTestResults(t, issues, 1, "Potential hardcoded credentials") -} - -func TestHardcodedConstant(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewHardcodedCredentials(config)) - - issues := gasTestRunner(` - package samples - - import "fmt" - - const password = "f62e5bcda4fae4f82370da0c6f20697b8f8447ef" - - func main() { - username := "admin" - fmt.Println("Doing something with: ", username, password) - }`, analyzer) - - checkTestResults(t, issues, 1, "Potential hardcoded credentials") -} - -func TestHardcodedConstantMulti(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewHardcodedCredentials(config)) - - issues := gasTestRunner(` - package samples - - import "fmt" - - const ( - username = "user" - password = "f62e5bcda4fae4f82370da0c6f20697b8f8447ef" - ) - - func main() { - fmt.Println("Doing something with: ", username, password) - }`, analyzer) - - checkTestResults(t, issues, 1, "Potential hardcoded credentials") -} - -func TestHardecodedVarsNotAssigned(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewHardcodedCredentials(config)) - issues := gasTestRunner(` - package main - var password string - func init() { - password = "f62e5bcda4fae4f82370da0c6f20697b8f8447ef" - }`, analyzer) - checkTestResults(t, issues, 1, "Potential hardcoded credentials") -} - -func TestHardcodedConstInteger(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewHardcodedCredentials(config)) - issues := gasTestRunner(` - package main - - const ( - ATNStateSomethingElse = 1 - ATNStateTokenStart = 42 - ) - func main() { - println(ATNStateTokenStart) - }`, analyzer) - checkTestResults(t, issues, 0, "Potential hardcoded credentials") -} - -func TestHardcodedConstString(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewHardcodedCredentials(config)) - issues := gasTestRunner(` - package main - - const ( - ATNStateTokenStart = "f62e5bcda4fae4f82370da0c6f20697b8f8447ef" - ) - func main() { - println(ATNStateTokenStart) - }`, analyzer) - checkTestResults(t, issues, 1, "Potential hardcoded credentials") -} diff --git a/installer/vendor/github.com/GoASTScanner/gas/rules/httpoxy_test.go b/installer/vendor/github.com/GoASTScanner/gas/rules/httpoxy_test.go deleted file mode 100644 index b666fdfeb8..0000000000 --- a/installer/vendor/github.com/GoASTScanner/gas/rules/httpoxy_test.go +++ /dev/null @@ -1,39 +0,0 @@ -// (c) Copyright 2016 Hewlett Packard Enterprise Development LP -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rules - -import ( - "testing" - - gas "github.com/GoASTScanner/gas/core" -) - -func TestHttpoxy(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewBlacklist_net_http_cgi(config)) - - issues := gasTestRunner(` - package main - import ( - "net/http/cgi" - "net/http" - ) - func main() { - cgi.Serve(http.FileServer(http.Dir("/usr/share/doc"))) - }`, analyzer) - - checkTestResults(t, issues, 1, "Go versions < 1.6.3 are vulnerable to Httpoxy") -} diff --git a/installer/vendor/github.com/GoASTScanner/gas/rules/nosec_test.go b/installer/vendor/github.com/GoASTScanner/gas/rules/nosec_test.go deleted file mode 100644 index dbbc107272..0000000000 --- a/installer/vendor/github.com/GoASTScanner/gas/rules/nosec_test.go +++ /dev/null @@ -1,85 +0,0 @@ -// (c) Copyright 2016 Hewlett Packard Enterprise Development LP -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rules - -import ( - "testing" - - gas "github.com/GoASTScanner/gas/core" -) - -func TestNosec(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewSubproc(config)) - - issues := gasTestRunner( - `package main - import ( - "os" - "os/exec" - ) - - func main() { - cmd := exec.Command("sh", "-c", os.Getenv("BLAH")) // #nosec - cmd.Run() - }`, analyzer) - - checkTestResults(t, issues, 0, "None") -} - -func TestNosecBlock(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewSubproc(config)) - - issues := gasTestRunner( - `package main - import ( - "os" - "os/exec" - ) - - func main() { - // #nosec - if true { - cmd := exec.Command("sh", "-c", os.Getenv("BLAH")) - cmd.Run() - } - }`, analyzer) - - checkTestResults(t, issues, 0, "None") -} - -func TestNosecIgnore(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": true} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewSubproc(config)) - - issues := gasTestRunner( - `package main - - import ( - "os" - "os/exec" - ) - - func main() { - cmd := exec.Command("sh", "-c", os.Args[1]) // #nosec - cmd.Run() - }`, analyzer) - - checkTestResults(t, issues, 1, "Subprocess launching with variable.") -} diff --git a/installer/vendor/github.com/GoASTScanner/gas/rules/rand_test.go b/installer/vendor/github.com/GoASTScanner/gas/rules/rand_test.go deleted file mode 100644 index d6de104408..0000000000 --- a/installer/vendor/github.com/GoASTScanner/gas/rules/rand_test.go +++ /dev/null @@ -1,85 +0,0 @@ -// (c) Copyright 2016 Hewlett Packard Enterprise Development LP -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rules - -import ( - "testing" - - gas "github.com/GoASTScanner/gas/core" -) - -func TestRandOk(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewWeakRandCheck(config)) - - issues := gasTestRunner( - ` - package main - - import "crypto/rand" - - func main() { - good, _ := rand.Read(nil) - println(good) - }`, analyzer) - - checkTestResults(t, issues, 0, "Not expected to match") -} - -func TestRandBad(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewWeakRandCheck(config)) - - issues := gasTestRunner( - ` - package main - - import "math/rand" - - func main() { - bad := rand.Int() - println(bad) - - }`, analyzer) - - checkTestResults(t, issues, 1, "Use of weak random number generator (math/rand instead of crypto/rand)") -} - -func TestRandRenamed(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewWeakRandCheck(config)) - - issues := gasTestRunner( - ` - package main - - import ( - "crypto/rand" - mrand "math/rand" - ) - - - func main() { - good, _ := rand.Read(nil) - println(good) - i := mrand.Int31() - println(i) - }`, analyzer) - - checkTestResults(t, issues, 0, "Not expected to match") -} diff --git a/installer/vendor/github.com/GoASTScanner/gas/rules/rsa_test.go b/installer/vendor/github.com/GoASTScanner/gas/rules/rsa_test.go deleted file mode 100644 index 9b0b47b6ca..0000000000 --- a/installer/vendor/github.com/GoASTScanner/gas/rules/rsa_test.go +++ /dev/null @@ -1,50 +0,0 @@ -// (c) Copyright 2016 Hewlett Packard Enterprise Development LP -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rules - -import ( - "testing" - - gas "github.com/GoASTScanner/gas/core" -) - -func TestRSAKeys(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewWeakKeyStrength(config)) - - issues := gasTestRunner( - `package main - - import ( - "crypto/rand" - "crypto/rsa" - "fmt" - ) - - func main() { - - //Generate Private Key - pvk, err := rsa.GenerateKey(rand.Reader, 1024) - - if err != nil { - fmt.Println(err) - } - fmt.Println(pvk) - - }`, analyzer) - - checkTestResults(t, issues, 1, "RSA keys should") -} diff --git a/installer/vendor/github.com/GoASTScanner/gas/rules/sql_test.go b/installer/vendor/github.com/GoASTScanner/gas/rules/sql_test.go deleted file mode 100644 index 8919f7ab2a..0000000000 --- a/installer/vendor/github.com/GoASTScanner/gas/rules/sql_test.go +++ /dev/null @@ -1,216 +0,0 @@ -// (c) Copyright 2016 Hewlett Packard Enterprise Development LP -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rules - -import ( - "testing" - - gas "github.com/GoASTScanner/gas/core" -) - -func TestSQLInjectionViaConcatenation(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewSqlStrConcat(config)) - - source := ` - package main - import ( - "database/sql" - //_ "github.com/mattn/go-sqlite3" - "os" - ) - func main(){ - db, err := sql.Open("sqlite3", ":memory:") - if err != nil { - panic(err) - } - rows, err := db.Query("SELECT * FROM foo WHERE name = " + os.Args[1]) - if err != nil { - panic(err) - } - defer rows.Close() - } - ` - issues := gasTestRunner(source, analyzer) - checkTestResults(t, issues, 1, "SQL string concatenation") -} - -func TestSQLInjectionViaIntepolation(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewSqlStrFormat(config)) - - source := ` - package main - import ( - "database/sql" - "fmt" - "os" - //_ "github.com/mattn/go-sqlite3" - ) - func main(){ - db, err := sql.Open("sqlite3", ":memory:") - if err != nil { - panic(err) - } - q := fmt.Sprintf("SELECT * FROM foo where name = '%s'", os.Args[1]) - rows, err := db.Query(q) - if err != nil { - panic(err) - } - defer rows.Close() - } - ` - issues := gasTestRunner(source, analyzer) - checkTestResults(t, issues, 1, "SQL string formatting") -} - -func TestSQLInjectionFalsePositiveA(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewSqlStrConcat(config)) - analyzer.AddRule(NewSqlStrFormat(config)) - - source := ` - - package main - import ( - "database/sql" - //_ "github.com/mattn/go-sqlite3" - ) - - var staticQuery = "SELECT * FROM foo WHERE age < 32" - - func main(){ - db, err := sql.Open("sqlite3", ":memory:") - if err != nil { - panic(err) - } - rows, err := db.Query(staticQuery) - if err != nil { - panic(err) - } - defer rows.Close() - } - - ` - issues := gasTestRunner(source, analyzer) - - checkTestResults(t, issues, 0, "Not expected to match") -} - -func TestSQLInjectionFalsePositiveB(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewSqlStrConcat(config)) - analyzer.AddRule(NewSqlStrFormat(config)) - - source := ` - - package main - import ( - "database/sql" - //_ "github.com/mattn/go-sqlite3" - ) - - var staticQuery = "SELECT * FROM foo WHERE age < 32" - - func main(){ - db, err := sql.Open("sqlite3", ":memory:") - if err != nil { - panic(err) - } - rows, err := db.Query(staticQuery) - if err != nil { - panic(err) - } - defer rows.Close() - } - - ` - issues := gasTestRunner(source, analyzer) - - checkTestResults(t, issues, 0, "Not expected to match") -} - -func TestSQLInjectionFalsePositiveC(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewSqlStrConcat(config)) - analyzer.AddRule(NewSqlStrFormat(config)) - - source := ` - - package main - import ( - "database/sql" - //_ "github.com/mattn/go-sqlite3" - ) - - var staticQuery = "SELECT * FROM foo WHERE age < " - - func main(){ - db, err := sql.Open("sqlite3", ":memory:") - if err != nil { - panic(err) - } - rows, err := db.Query(staticQuery + "32") - if err != nil { - panic(err) - } - defer rows.Close() - } - - ` - issues := gasTestRunner(source, analyzer) - - checkTestResults(t, issues, 0, "Not expected to match") -} - -func TestSQLInjectionFalsePositiveD(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewSqlStrConcat(config)) - analyzer.AddRule(NewSqlStrFormat(config)) - - source := ` - - package main - import ( - "database/sql" - //_ "github.com/mattn/go-sqlite3" - ) - - const age = "32" - var staticQuery = "SELECT * FROM foo WHERE age < " - - func main(){ - db, err := sql.Open("sqlite3", ":memory:") - if err != nil { - panic(err) - } - rows, err := db.Query(staticQuery + age) - if err != nil { - panic(err) - } - defer rows.Close() - } - - ` - issues := gasTestRunner(source, analyzer) - - checkTestResults(t, issues, 0, "Not expected to match") -} diff --git a/installer/vendor/github.com/GoASTScanner/gas/rules/subproc_test.go b/installer/vendor/github.com/GoASTScanner/gas/rules/subproc_test.go deleted file mode 100644 index 13c79df454..0000000000 --- a/installer/vendor/github.com/GoASTScanner/gas/rules/subproc_test.go +++ /dev/null @@ -1,124 +0,0 @@ -// (c) Copyright 2016 Hewlett Packard Enterprise Development LP -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rules - -import ( - "testing" - - gas "github.com/GoASTScanner/gas/core" -) - -func TestSubprocess(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewSubproc(config)) - - issues := gasTestRunner(` - package main - - import ( - "log" - "os/exec" - ) - - func main() { - val := "/bin/" + "sleep" - cmd := exec.Command(val, "5") - err := cmd.Start() - if err != nil { - log.Fatal(err) - } - log.Printf("Waiting for command to finish...") - err = cmd.Wait() - log.Printf("Command finished with error: %v", err) - }`, analyzer) - - checkTestResults(t, issues, 1, "Subprocess launching should be audited.") -} - -func TestSubprocessVar(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewSubproc(config)) - - issues := gasTestRunner(` - package main - - import ( - "log" - "os" - "os/exec" - ) - - func main() { - run := "sleep" + os.Getenv("SOMETHING") - cmd := exec.Command(run, "5") - err := cmd.Start() - if err != nil { - log.Fatal(err) - } - log.Printf("Waiting for command to finish...") - err = cmd.Wait() - log.Printf("Command finished with error: %v", err) - }`, analyzer) - - checkTestResults(t, issues, 1, "Subprocess launching with variable.") -} - -func TestSubprocessPath(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewSubproc(config)) - - issues := gasTestRunner(` - package main - - import ( - "log" - "os/exec" - ) - - func main() { - cmd := exec.Command("sleep", "5") - err := cmd.Start() - if err != nil { - log.Fatal(err) - } - log.Printf("Waiting for command to finish...") - err = cmd.Wait() - log.Printf("Command finished with error: %v", err) - }`, analyzer) - - checkTestResults(t, issues, 1, "Subprocess launching with partial path.") -} - -func TestSubprocessSyscall(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewSubproc(config)) - - issues := gasTestRunner(` - package main - - import ( - "syscall" - ) - - func main() { - syscall.Exec("/bin/cat", []string{ "/etc/passwd" }, nil) - }`, analyzer) - - checkTestResults(t, issues, 1, "Subprocess launching should be audited.") -} diff --git a/installer/vendor/github.com/GoASTScanner/gas/rules/tempfiles_test.go b/installer/vendor/github.com/GoASTScanner/gas/rules/tempfiles_test.go deleted file mode 100644 index 51709e8fbb..0000000000 --- a/installer/vendor/github.com/GoASTScanner/gas/rules/tempfiles_test.go +++ /dev/null @@ -1,47 +0,0 @@ -// (c) Copyright 2016 Hewlett Packard Enterprise Development LP -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rules - -import ( - "testing" - - gas "github.com/GoASTScanner/gas/core" -) - -func TestTempfiles(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewBadTempFile(config)) - - source := ` - package samples - - import ( - "io/ioutil" - "os" - ) - - func main() { - - file1, _ := os.Create("/tmp/demo1") - defer file1.Close() - - ioutil.WriteFile("/tmp/demo2", []byte("This is some data"), 0644) - } - ` - - issues := gasTestRunner(source, analyzer) - checkTestResults(t, issues, 2, "shared tmp directory") -} diff --git a/installer/vendor/github.com/GoASTScanner/gas/rules/templates_test.go b/installer/vendor/github.com/GoASTScanner/gas/rules/templates_test.go deleted file mode 100644 index 83dccf1f38..0000000000 --- a/installer/vendor/github.com/GoASTScanner/gas/rules/templates_test.go +++ /dev/null @@ -1,136 +0,0 @@ -// (c) Copyright 2016 Hewlett Packard Enterprise Development LP -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rules - -import ( - "testing" - - gas "github.com/GoASTScanner/gas/core" -) - -func TestTemplateCheckSafe(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewTemplateCheck(config)) - - source := ` - package samples - - import ( - "html/template" - "os" - ) - - const tmpl = "" - - func main() { - t := template.Must(template.New("ex").Parse(tmpl)) - v := map[string]interface{}{ - "Title": "Test World", - "Body": template.HTML(""), - } - t.Execute(os.Stdout, v) - }` - - issues := gasTestRunner(source, analyzer) - checkTestResults(t, issues, 0, "this method will not auto-escape HTML. Verify data is well formed") -} - -func TestTemplateCheckBadHTML(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewTemplateCheck(config)) - - source := ` - package samples - - import ( - "html/template" - "os" - ) - - const tmpl = "" - - func main() { - a := "something from another place" - t := template.Must(template.New("ex").Parse(tmpl)) - v := map[string]interface{}{ - "Title": "Test World", - "Body": template.HTML(a), - } - t.Execute(os.Stdout, v) - }` - - issues := gasTestRunner(source, analyzer) - checkTestResults(t, issues, 1, "this method will not auto-escape HTML. Verify data is well formed") -} - -func TestTemplateCheckBadJS(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewTemplateCheck(config)) - - source := ` - package samples - - import ( - "html/template" - "os" - ) - - const tmpl = "" - - func main() { - a := "something from another place" - t := template.Must(template.New("ex").Parse(tmpl)) - v := map[string]interface{}{ - "Title": "Test World", - "Body": template.JS(a), - } - t.Execute(os.Stdout, v) - }` - - issues := gasTestRunner(source, analyzer) - checkTestResults(t, issues, 1, "this method will not auto-escape HTML. Verify data is well formed") -} - -func TestTemplateCheckBadURL(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewTemplateCheck(config)) - - source := ` - package samples - - import ( - "html/template" - "os" - ) - - const tmpl = "" - - func main() { - a := "something from another place" - t := template.Must(template.New("ex").Parse(tmpl)) - v := map[string]interface{}{ - "Title": "Test World", - "Body": template.URL(a), - } - t.Execute(os.Stdout, v) - }` - - issues := gasTestRunner(source, analyzer) - checkTestResults(t, issues, 1, "this method will not auto-escape HTML. Verify data is well formed") -} diff --git a/installer/vendor/github.com/GoASTScanner/gas/rules/tls_test.go b/installer/vendor/github.com/GoASTScanner/gas/rules/tls_test.go deleted file mode 100644 index 9b215a3cef..0000000000 --- a/installer/vendor/github.com/GoASTScanner/gas/rules/tls_test.go +++ /dev/null @@ -1,169 +0,0 @@ -// (c) Copyright 2016 Hewlett Packard Enterprise Development LP -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rules - -import ( - "testing" - - gas "github.com/GoASTScanner/gas/core" -) - -func TestInsecureSkipVerify(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewModernTlsCheck(config)) - - issues := gasTestRunner(` - package main - - import ( - "crypto/tls" - "fmt" - "net/http" - ) - - func main() { - tr := &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, - } - client := &http.Client{Transport: tr} - _, err := client.Get("https://golang.org/") - if err != nil { - fmt.Println(err) - } - } - `, analyzer) - - checkTestResults(t, issues, 1, "TLS InsecureSkipVerify set true") -} - -func TestInsecureMinVersion(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewModernTlsCheck(config)) - - issues := gasTestRunner(` - package main - - import ( - "crypto/tls" - "fmt" - "net/http" - ) - - func main() { - tr := &http.Transport{ - TLSClientConfig: &tls.Config{MinVersion: 0}, - } - client := &http.Client{Transport: tr} - _, err := client.Get("https://golang.org/") - if err != nil { - fmt.Println(err) - } - } - `, analyzer) - - checkTestResults(t, issues, 1, "TLS MinVersion too low") -} - -func TestInsecureMaxVersion(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewModernTlsCheck(config)) - - issues := gasTestRunner(` - package main - - import ( - "crypto/tls" - "fmt" - "net/http" - ) - - func main() { - tr := &http.Transport{ - TLSClientConfig: &tls.Config{MaxVersion: 0}, - } - client := &http.Client{Transport: tr} - _, err := client.Get("https://golang.org/") - if err != nil { - fmt.Println(err) - } - } - `, analyzer) - - checkTestResults(t, issues, 1, "TLS MaxVersion too low") -} - -func TestInsecureCipherSuite(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewModernTlsCheck(config)) - - issues := gasTestRunner(` - package main - - import ( - "crypto/tls" - "fmt" - "net/http" - ) - - func main() { - tr := &http.Transport{ - TLSClientConfig: &tls.Config{CipherSuites: []uint16{ - tls.TLS_RSA_WITH_RC4_128_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - },}, - } - client := &http.Client{Transport: tr} - _, err := client.Get("https://golang.org/") - if err != nil { - fmt.Println(err) - } - } - `, analyzer) - - checkTestResults(t, issues, 1, "TLS Bad Cipher Suite: TLS_RSA_WITH_RC4_128_SHA") -} - -func TestPreferServerCipherSuites(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewModernTlsCheck(config)) - - issues := gasTestRunner(` - package main - - import ( - "crypto/tls" - "fmt" - "net/http" - ) - - func main() { - tr := &http.Transport{ - TLSClientConfig: &tls.Config{PreferServerCipherSuites: false}, - } - client := &http.Client{Transport: tr} - _, err := client.Get("https://golang.org/") - if err != nil { - fmt.Println(err) - } - } - `, analyzer) - - checkTestResults(t, issues, 1, "TLS PreferServerCipherSuites set false") -} diff --git a/installer/vendor/github.com/GoASTScanner/gas/rules/unsafe_test.go b/installer/vendor/github.com/GoASTScanner/gas/rules/unsafe_test.go deleted file mode 100644 index f8d7787833..0000000000 --- a/installer/vendor/github.com/GoASTScanner/gas/rules/unsafe_test.go +++ /dev/null @@ -1,55 +0,0 @@ -// (c) Copyright 2016 Hewlett Packard Enterprise Development LP -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rules - -import ( - "testing" - - gas "github.com/GoASTScanner/gas/core" -) - -func TestUnsafe(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewUsingUnsafe(config)) - - issues := gasTestRunner(` - package main - - import ( - "fmt" - "unsafe" - ) - - type Fake struct{} - - func (Fake) Good() {} - - func main() { - unsafeM := Fake{} - unsafeM.Good() - intArray := [...]int{1, 2} - fmt.Printf("\nintArray: %v\n", intArray) - intPtr := &intArray[0] - fmt.Printf("\nintPtr=%p, *intPtr=%d.\n", intPtr, *intPtr) - addressHolder := uintptr(unsafe.Pointer(intPtr)) + unsafe.Sizeof(intArray[0]) - intPtr = (*int)(unsafe.Pointer(addressHolder)) - fmt.Printf("\nintPtr=%p, *intPtr=%d.\n\n", intPtr, *intPtr) - } - `, analyzer) - - checkTestResults(t, issues, 3, "Use of unsafe calls") - -} diff --git a/installer/vendor/github.com/GoASTScanner/gas/rules/utils_test.go b/installer/vendor/github.com/GoASTScanner/gas/rules/utils_test.go deleted file mode 100644 index 48fa36f425..0000000000 --- a/installer/vendor/github.com/GoASTScanner/gas/rules/utils_test.go +++ /dev/null @@ -1,40 +0,0 @@ -// (c) Copyright 2016 Hewlett Packard Enterprise Development LP -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rules - -import ( - "strings" - "testing" - - gas "github.com/GoASTScanner/gas/core" -) - -func gasTestRunner(source string, analyzer gas.Analyzer) []*gas.Issue { - analyzer.ProcessSource("dummy.go", source) - return analyzer.Issues -} - -func checkTestResults(t *testing.T, issues []*gas.Issue, expected int, msg string) { - found := len(issues) - if found != expected { - t.Errorf("Found %d issues, expected %d", found, expected) - } - - for _, issue := range issues { - if !strings.Contains(issue.What, msg) { - t.Errorf("Unexpected issue identified: %s", issue.What) - } - } -} diff --git a/installer/vendor/github.com/GoASTScanner/gas/rules/weakcrypto_test.go b/installer/vendor/github.com/GoASTScanner/gas/rules/weakcrypto_test.go deleted file mode 100644 index 1305c33c68..0000000000 --- a/installer/vendor/github.com/GoASTScanner/gas/rules/weakcrypto_test.go +++ /dev/null @@ -1,114 +0,0 @@ -// (c) Copyright 2016 Hewlett Packard Enterprise Development LP -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package rules - -import ( - "testing" - - gas "github.com/GoASTScanner/gas/core" -) - -func TestMD5(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewBlacklist_crypto_md5(config)) - analyzer.AddRule(NewUsesWeakCryptography(config)) - - issues := gasTestRunner(` - package main - import ( - "crypto/md5" - "fmt" - "os" - ) - func main() { - for _, arg := range os.Args { - fmt.Printf("%x - %s\n", md5.Sum([]byte(arg)), arg) - } - } - `, analyzer) - checkTestResults(t, issues, 2, "weak cryptographic") -} - -func TestDES(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewBlacklist_crypto_des(config)) - analyzer.AddRule(NewUsesWeakCryptography(config)) - - issues := gasTestRunner(` - package main - - import ( - "crypto/cipher" - "crypto/des" - "crypto/rand" - "encoding/hex" - "fmt" - "io" - ) - - func main() { - block, err := des.NewCipher([]byte("sekritz")) - if err != nil { - panic(err) - } - - plaintext := []byte("I CAN HAZ SEKRIT MSG PLZ") - ciphertext := make([]byte, des.BlockSize+len(plaintext)) - iv := ciphertext[:des.BlockSize] - if _, err := io.ReadFull(rand.Reader, iv); err != nil { - panic(err) - } - - stream := cipher.NewCFBEncrypter(block, iv) - stream.XORKeyStream(ciphertext[des.BlockSize:], plaintext) - fmt.Println("Secret message is: %s", hex.EncodeToString(ciphertext)) - } - `, analyzer) - - checkTestResults(t, issues, 2, "weak cryptographic") -} - -func TestRC4(t *testing.T) { - config := map[string]interface{}{"ignoreNosec": false} - analyzer := gas.NewAnalyzer(config, nil) - analyzer.AddRule(NewBlacklist_crypto_rc4(config)) - analyzer.AddRule(NewUsesWeakCryptography(config)) - - issues := gasTestRunner(` - package main - - import ( - "crypto/rc4" - "encoding/hex" - "fmt" - ) - - func main() { - cipher, err := rc4.NewCipher([]byte("sekritz")) - if err != nil { - panic(err) - } - - plaintext := []byte("I CAN HAZ SEKRIT MSG PLZ") - ciphertext := make([]byte, len(plaintext)) - cipher.XORKeyStream(ciphertext, plaintext) - fmt.Println("Secret message is: %s", hex.EncodeToString(ciphertext)) - } - `, analyzer) - - checkTestResults(t, issues, 2, "weak cryptographic") -} diff --git a/installer/vendor/github.com/GoASTScanner/gas/vendor.conf b/installer/vendor/github.com/GoASTScanner/gas/vendor.conf deleted file mode 100644 index 5f5b814f43..0000000000 --- a/installer/vendor/github.com/GoASTScanner/gas/vendor.conf +++ /dev/null @@ -1,7 +0,0 @@ -# package -github.com/GoAstScanner/gas - -# import -github.com/GoASTScanner/gas cc52ef5 -github.com/nbutton23/zxcvbn-go a22cb81 -github.com/ryanuber/go-glob v0.1 diff --git a/installer/vendor/github.com/Masterminds/semver/.travis.yml b/installer/vendor/github.com/Masterminds/semver/.travis.yml deleted file mode 100644 index fa92a5a326..0000000000 --- a/installer/vendor/github.com/Masterminds/semver/.travis.yml +++ /dev/null @@ -1,24 +0,0 @@ -language: go - -go: - - 1.6 - - 1.7 - - tip - -# Setting sudo access to false will let Travis CI use containers rather than -# VMs to run the tests. For more details see: -# - http://docs.travis-ci.com/user/workers/container-based-infrastructure/ -# - http://docs.travis-ci.com/user/workers/standard-infrastructure/ -sudo: false - -script: - - make setup - - make test - -notifications: - webhooks: - urls: - - https://webhooks.gitter.im/e/06e3328629952dabe3e0 - on_success: change # options: [always|never|change] default: always - on_failure: always # options: [always|never|change] default: always - on_start: never # options: [always|never|change] default: always diff --git a/installer/vendor/github.com/Masterminds/semver/CHANGELOG.md b/installer/vendor/github.com/Masterminds/semver/CHANGELOG.md deleted file mode 100644 index 25550675e9..0000000000 --- a/installer/vendor/github.com/Masterminds/semver/CHANGELOG.md +++ /dev/null @@ -1,17 +0,0 @@ -# Release 1.x.x (xxxx-xx-xx) - -- Issue #9: Speed up version comparison performance (thanks @sdboyer) -- Issue #8: Added benchmarks (thanks @sdboyer) - -# Release 1.1.0 (2015-03-11) - -- Issue #2: Implemented validation to provide reasons a versions failed a - constraint. - -# Release 1.0.1 (2015-12-31) - -- Fixed #1: * constraint failing on valid versions. - -# Release 1.0.0 (2015-10-20) - -- Initial release diff --git a/installer/vendor/github.com/Masterminds/semver/Makefile b/installer/vendor/github.com/Masterminds/semver/Makefile deleted file mode 100644 index a7a1b4e36d..0000000000 --- a/installer/vendor/github.com/Masterminds/semver/Makefile +++ /dev/null @@ -1,36 +0,0 @@ -.PHONY: setup -setup: - go get -u gopkg.in/alecthomas/gometalinter.v1 - gometalinter.v1 --install - -.PHONY: test -test: validate lint - @echo "==> Running tests" - go test -v - -.PHONY: validate -validate: - @echo "==> Running static validations" - @gometalinter.v1 \ - --disable-all \ - --enable deadcode \ - --severity deadcode:error \ - --enable gofmt \ - --enable gosimple \ - --enable ineffassign \ - --enable misspell \ - --enable vet \ - --tests \ - --vendor \ - --deadline 60s \ - ./... || exit_code=1 - -.PHONY: lint -lint: - @echo "==> Running linters" - @gometalinter.v1 \ - --disable-all \ - --enable golint \ - --vendor \ - --deadline 60s \ - ./... || : diff --git a/installer/vendor/github.com/Masterminds/semver/README.md b/installer/vendor/github.com/Masterminds/semver/README.md deleted file mode 100644 index aa133eac57..0000000000 --- a/installer/vendor/github.com/Masterminds/semver/README.md +++ /dev/null @@ -1,146 +0,0 @@ -# SemVer - -The `semver` package provides the ability to work with [Semantic Versions](http://semver.org) in Go. Specifically it provides the ability to: - -* Parse semantic versions -* Sort semantic versions -* Check if a semantic version fits within a set of constraints -* Optionally work with a `v` prefix - -[![Build Status](https://travis-ci.org/Masterminds/semver.svg)](https://travis-ci.org/Masterminds/semver) [![Build status](https://ci.appveyor.com/api/projects/status/jfk66lib7hb985k8/branch/master?svg=true&passingText=windows%20build%20passing&failingText=windows%20build%20failing)](https://ci.appveyor.com/project/mattfarina/semver/branch/master) [![GoDoc](https://godoc.org/github.com/Masterminds/semver?status.png)](https://godoc.org/github.com/Masterminds/semver) [![Go Report Card](http://goreportcard.com/badge/Masterminds/semver)](http://goreportcard.com/report/Masterminds/semver) - -## Parsing Semantic Versions - -To parse a semantic version use the `NewVersion` function. For example, - - v, err := semver.NewVersion("1.2.3-beta.1+build345") - -If there is an error the version wasn't parseable. The version object has methods -to get the parts of the version, compare it to other versions, convert the -version back into a string, and get the original string. For more details -please see the [documentation](https://godoc.org/github.com/Masterminds/semver). - -## Sorting Semantic Versions - -A set of versions can be sorted using the [`sort`](https://golang.org/pkg/sort/) -package from the standard library. For example, - - raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} - vs := make([]*semver.Version, len(raw)) - for i, r := range raw { - v, err := semver.NewVersion(r) - if err != nil { - t.Errorf("Error parsing version: %s", err) - } - - vs[i] = v - } - - sort.Sort(semver.Collection(vs)) - -## Checking Version Constraints - -Checking a version against version constraints is one of the most featureful -parts of the package. - - c, err := semver.NewConstraint(">= 1.2.3") - if err != nil { - // Handle constraint not being parseable. - } - - v, _ := semver.NewVersion("1.3") - if err != nil { - // Handle version not being parseable. - } - // Check if the version meets the constraints. The a variable will be true. - a := c.Check(v) - -## Basic Comparisons - -There are two elements to the comparisons. First, a comparison string is a list -of comma separated and comparisons. These are then separated by || separated or -comparisons. For example, `">= 1.2, < 3.0.0 || >= 4.2.3"` is looking for a -comparison that's greater than or equal to 1.2 and less than 3.0.0 or is -greater than or equal to 4.2.3. - -The basic comparisons are: - -* `=`: equal (aliased to no operator) -* `!=`: not equal -* `>`: greater than -* `<`: less than -* `>=`: greater than or equal to -* `<=`: less than or equal to - -## Hyphen Range Comparisons - -There are multiple methods to handle ranges and the first is hyphens ranges. -These look like: - -* `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` -* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4, <= 4.5` - -## Wildcards In Comparisons - -The `x`, `X`, and `*` characters can be used as a wildcard character. This works -for all comparison operators. When used on the `=` operator it falls -back to the pack level comparison (see tilde below). For example, - -* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` -* `>= 1.2.x` is equivalent to `>= 1.2.0` -* `<= 2.x` is equivalent to `<= 3` -* `*` is equivalent to `>= 0.0.0` - -## Tilde Range Comparisons (Patch) - -The tilde (`~`) comparison operator is for patch level ranges when a minor -version is specified and major level changes when the minor number is missing. -For example, - -* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0` -* `~1` is equivalent to `>= 1, < 2` -* `~2.3` is equivalent to `>= 2.3, < 2.4` -* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` -* `~1.x` is equivalent to `>= 1, < 2` - -## Caret Range Comparisons (Major) - -The caret (`^`) comparison operator is for major level changes. This is useful -when comparisons of API versions as a major change is API breaking. For example, - -* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` -* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` -* `^2.3` is equivalent to `>= 2.3, < 3` -* `^2.x` is equivalent to `>= 2.0.0, < 3` - -# Validation - -In addition to testing a version against a constraint, a version can be validated -against a constraint. When validation fails a slice of errors containing why a -version didn't meet the constraint is returned. For example, - - c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") - if err != nil { - // Handle constraint not being parseable. - } - - v, _ := semver.NewVersion("1.3") - if err != nil { - // Handle version not being parseable. - } - - // Validate a version against a constraint. - a, msgs := c.Validate(v) - // a is false - for _, m := range msgs { - fmt.Println(m) - - // Loops over the errors which would read - // "1.3 is greater than 1.2.3" - // "1.3 is less than 1.4" - } - -# Contribute - -If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues) -or [create a pull request](https://github.com/Masterminds/semver/pulls). diff --git a/installer/vendor/github.com/Masterminds/semver/appveyor.yml b/installer/vendor/github.com/Masterminds/semver/appveyor.yml deleted file mode 100644 index 08d6070875..0000000000 --- a/installer/vendor/github.com/Masterminds/semver/appveyor.yml +++ /dev/null @@ -1,44 +0,0 @@ -version: build-{build}.{branch} - -clone_folder: C:\gopath\src\github.com\Masterminds\semver -shallow_clone: true - -environment: - GOPATH: C:\gopath - -platform: - - x64 - -install: - - go version - - go env - - go get -u gopkg.in/alecthomas/gometalinter.v1 - - set PATH=%PATH%;%GOPATH%\bin - - gometalinter.v1.exe --install - -build_script: - - go install -v ./... - -test_script: - - "gometalinter.v1 \ - --disable-all \ - --enable deadcode \ - --severity deadcode:error \ - --enable gofmt \ - --enable gosimple \ - --enable ineffassign \ - --enable misspell \ - --enable vet \ - --tests \ - --vendor \ - --deadline 60s \ - ./... || cmd /C EXIT 0" - - "gometalinter.v1 \ - --disable-all \ - --enable golint \ - --vendor \ - --deadline 60s \ - ./... || cmd /C EXIT 0" - - go test -v - -deploy: off diff --git a/installer/vendor/github.com/Masterminds/semver/benchmark_test.go b/installer/vendor/github.com/Masterminds/semver/benchmark_test.go deleted file mode 100644 index 5a76f6a934..0000000000 --- a/installer/vendor/github.com/Masterminds/semver/benchmark_test.go +++ /dev/null @@ -1,259 +0,0 @@ -package semver - -import "testing" - -func init() { - // disable constraint and version creation caching - CacheConstraints = false - CacheVersions = false -} - -var ( - rc1 = rangeConstraint{ - min: newV(1, 5, 0), - max: newV(2, 0, 0), - includeMax: true, - } - rc2 = rangeConstraint{ - min: newV(2, 0, 0), - max: newV(3, 0, 0), - } - rc3 = rangeConstraint{ - min: newV(1, 5, 0), - max: newV(2, 0, 0), - } - rc4 = rangeConstraint{ - min: newV(1, 7, 0), - max: newV(4, 0, 0), - } - rc5 = rangeConstraint{ - min: newV(2, 7, 0), - max: newV(3, 0, 0), - } - rc6 = rangeConstraint{ - min: newV(3, 0, 1), - max: newV(3, 0, 4), - } - rc7 = rangeConstraint{ - min: newV(1, 0, 0), - max: newV(1, 2, 0), - } - // Two fully non-overlapping unions - u1 = rc1.Union(rc7) - u2 = rc5.Union(rc6) -) - -/* Constraint creation benchmarks */ - -func benchNewConstraint(c string, b *testing.B) { - for i := 0; i < b.N; i++ { - NewConstraint(c) - } -} - -func BenchmarkNewConstraintUnary(b *testing.B) { - benchNewConstraint("=2.0", b) -} - -func BenchmarkNewConstraintTilde(b *testing.B) { - benchNewConstraint("~2.0.0", b) -} - -func BenchmarkNewConstraintCaret(b *testing.B) { - benchNewConstraint("^2.0.0", b) -} - -func BenchmarkNewConstraintWildcard(b *testing.B) { - benchNewConstraint("1.x", b) -} - -func BenchmarkNewConstraintRange(b *testing.B) { - benchNewConstraint(">=2.1.x, <3.1.0", b) -} - -func BenchmarkNewConstraintUnion(b *testing.B) { - benchNewConstraint("~2.0.0 || =3.1.0", b) -} - -/* Validate benchmarks, including fails */ - -func benchValidateVersion(c, v string, b *testing.B) { - version, _ := NewVersion(v) - constraint, _ := NewConstraint(c) - - for i := 0; i < b.N; i++ { - constraint.Matches(version) - } -} - -func BenchmarkValidateVersionUnary(b *testing.B) { - benchValidateVersion("=2.0", "2.0.0", b) -} - -func BenchmarkValidateVersionUnaryFail(b *testing.B) { - benchValidateVersion("=2.0", "2.0.1", b) -} - -func BenchmarkValidateVersionTilde(b *testing.B) { - benchValidateVersion("~2.0.0", "2.0.5", b) -} - -func BenchmarkValidateVersionTildeFail(b *testing.B) { - benchValidateVersion("~2.0.0", "1.0.5", b) -} - -func BenchmarkValidateVersionCaret(b *testing.B) { - benchValidateVersion("^2.0.0", "2.1.0", b) -} - -func BenchmarkValidateVersionCaretFail(b *testing.B) { - benchValidateVersion("^2.0.0", "4.1.0", b) -} - -func BenchmarkValidateVersionWildcard(b *testing.B) { - benchValidateVersion("1.x", "1.4.0", b) -} - -func BenchmarkValidateVersionWildcardFail(b *testing.B) { - benchValidateVersion("1.x", "2.4.0", b) -} - -func BenchmarkValidateVersionRange(b *testing.B) { - benchValidateVersion(">=2.1.x, <3.1.0", "2.4.5", b) -} - -func BenchmarkValidateVersionRangeFail(b *testing.B) { - benchValidateVersion(">=2.1.x, <3.1.0", "1.4.5", b) -} - -func BenchmarkValidateVersionUnion(b *testing.B) { - benchValidateVersion("~2.0.0 || =3.1.0", "3.1.0", b) -} - -func BenchmarkValidateVersionUnionFail(b *testing.B) { - benchValidateVersion("~2.0.0 || =3.1.0", "3.1.1", b) -} - -/* Version creation benchmarks */ - -func benchNewVersion(v string, b *testing.B) { - for i := 0; i < b.N; i++ { - NewVersion(v) - } -} - -func BenchmarkNewVersionSimple(b *testing.B) { - benchNewVersion("1.0.0", b) -} - -func BenchmarkNewVersionPre(b *testing.B) { - benchNewVersion("1.0.0-alpha", b) -} - -func BenchmarkNewVersionMeta(b *testing.B) { - benchNewVersion("1.0.0+metadata", b) -} - -func BenchmarkNewVersionMetaDash(b *testing.B) { - benchNewVersion("1.0.0+metadata-dash", b) -} - -/* Union benchmarks */ - -func BenchmarkAdjacentRangeUnion(b *testing.B) { - for i := 0; i < b.N; i++ { - Union(rc1, rc2) - } -} - -func BenchmarkAdjacentRangeUnionMethod(b *testing.B) { - for i := 0; i < b.N; i++ { - rc1.Union(rc2) - } -} - -func BenchmarkDisjointRangeUnion(b *testing.B) { - for i := 0; i < b.N; i++ { - Union(rc2, rc3) - } -} - -func BenchmarkDisjointRangeUnionMethod(b *testing.B) { - for i := 0; i < b.N; i++ { - rc2.Union(rc3) - } -} - -func BenchmarkOverlappingRangeUnion(b *testing.B) { - for i := 0; i < b.N; i++ { - Union(rc1, rc4) - } -} - -func BenchmarkOverlappingRangeUnionMethod(b *testing.B) { - for i := 0; i < b.N; i++ { - rc1.Union(rc4) - } -} - -func BenchmarkUnionUnion(b *testing.B) { - for i := 0; i < b.N; i++ { - Union(u1, u2) - } -} - -func BenchmarkUnionUnionMethod(b *testing.B) { - for i := 0; i < b.N; i++ { - u1.Union(u2) - } -} - -/* Intersection benchmarks */ - -func BenchmarkSubsetRangeIntersection(b *testing.B) { - for i := 0; i < b.N; i++ { - Intersection(rc2, rc4) - } -} - -func BenchmarkSubsetRangeIntersectionMethod(b *testing.B) { - for i := 0; i < b.N; i++ { - rc2.Intersect(rc4) - } -} - -func BenchmarkDisjointRangeIntersection(b *testing.B) { - for i := 0; i < b.N; i++ { - Intersection(rc2, rc3) - } -} - -func BenchmarkDisjointRangeIntersectionMethod(b *testing.B) { - for i := 0; i < b.N; i++ { - rc2.Intersect(rc3) - } -} - -func BenchmarkOverlappingRangeIntersection(b *testing.B) { - for i := 0; i < b.N; i++ { - Intersection(rc1, rc4) - } -} - -func BenchmarkOverlappingRangeIntersectionMethod(b *testing.B) { - for i := 0; i < b.N; i++ { - rc1.Intersect(rc4) - } -} - -func BenchmarkUnionIntersection(b *testing.B) { - for i := 0; i < b.N; i++ { - Intersection(u1, u2) - } -} - -func BenchmarkUnionIntersectionMethod(b *testing.B) { - for i := 0; i < b.N; i++ { - u1.Intersect(u2) - } -} diff --git a/installer/vendor/github.com/Masterminds/semver/collection_test.go b/installer/vendor/github.com/Masterminds/semver/collection_test.go deleted file mode 100644 index a1d745f476..0000000000 --- a/installer/vendor/github.com/Masterminds/semver/collection_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package semver - -import ( - "reflect" - "sort" - "testing" -) - -func TestCollection(t *testing.T) { - raw := []string{ - "1.2.3", - "1.0", - "1.3", - "2", - "0.4.2", - } - - vs := make([]Version, len(raw)) - for i, r := range raw { - v, err := NewVersion(r) - if err != nil { - t.Errorf("Error parsing version: %s", err) - } - - vs[i] = v - } - - sort.Sort(Collection(vs)) - - e := []string{ - "0.4.2", - "1.0.0", - "1.2.3", - "1.3.0", - "2.0.0", - } - - a := make([]string, len(vs)) - for i, v := range vs { - a[i] = v.String() - } - - if !reflect.DeepEqual(a, e) { - t.Error("Sorting Collection failed") - } -} diff --git a/installer/vendor/github.com/Masterminds/semver/constraints_test.go b/installer/vendor/github.com/Masterminds/semver/constraints_test.go deleted file mode 100644 index cb77c89d61..0000000000 --- a/installer/vendor/github.com/Masterminds/semver/constraints_test.go +++ /dev/null @@ -1,712 +0,0 @@ -package semver - -import "testing" - -func TestParseConstraint(t *testing.T) { - tests := []struct { - in string - c Constraint - err bool - }{ - {"*", Any(), false}, - {">= 1.2", rangeConstraint{ - min: newV(1, 2, 0), - max: Version{special: infiniteVersion}, - includeMin: true, - }, false}, - {"1.0", newV(1, 0, 0), false}, - {"foo", nil, true}, - {"<= 1.2", rangeConstraint{ - min: Version{special: zeroVersion}, - max: newV(1, 2, 0), - includeMax: true, - }, false}, - {"=< 1.2", rangeConstraint{ - min: Version{special: zeroVersion}, - max: newV(1, 2, 0), - includeMax: true, - }, false}, - {"=> 1.2", rangeConstraint{ - min: newV(1, 2, 0), - max: Version{special: infiniteVersion}, - includeMin: true, - }, false}, - {"v1.2", newV(1, 2, 0), false}, - {"=1.5", newV(1, 5, 0), false}, - {"> 1.3", rangeConstraint{ - min: newV(1, 3, 0), - max: Version{special: infiniteVersion}, - }, false}, - {"< 1.4.1", rangeConstraint{ - min: Version{special: zeroVersion}, - max: newV(1, 4, 1), - }, false}, - {"~1.1.0", rangeConstraint{ - min: newV(1, 1, 0), - max: newV(1, 2, 0), - includeMin: true, - includeMax: false, - }, false}, - {"^1.1.0", rangeConstraint{ - min: newV(1, 1, 0), - max: newV(2, 0, 0), - includeMin: true, - includeMax: false, - }, false}, - {"^1.1.0-12-abc123", rangeConstraint{ - min: Version{major: 1, minor: 1, patch: 0, pre: "12-abc123"}, - max: newV(2, 0, 0), - includeMin: true, - includeMax: false, - }, false}, - } - - for _, tc := range tests { - c, err := parseConstraint(tc.in, false) - if tc.err && err == nil { - t.Errorf("Expected error for %s didn't occur", tc.in) - } else if !tc.err && err != nil { - t.Errorf("Unexpected error %q for %s", err, tc.in) - } - - // If an error was expected continue the loop and don't try the other - // tests as they will cause errors. - if tc.err { - continue - } - - if !constraintEq(tc.c, c) { - t.Errorf("%q produced constraint %q, but expected %q", tc.in, c, tc.c) - } - } -} - -func constraintEq(c1, c2 Constraint) bool { - switch tc1 := c1.(type) { - case any: - if _, ok := c2.(any); !ok { - return false - } - return true - case none: - if _, ok := c2.(none); !ok { - return false - } - return true - case Version: - if tc2, ok := c2.(Version); ok { - return tc1.Equal(tc2) - } - return false - case rangeConstraint: - if tc2, ok := c2.(rangeConstraint); ok { - if len(tc1.excl) != len(tc2.excl) { - return false - } - - if !tc1.minIsZero() { - if !(tc1.includeMin == tc2.includeMin && tc1.min.Equal(tc2.min)) { - return false - } - } else if !tc2.minIsZero() { - return false - } - - if !tc1.maxIsInf() { - if !(tc1.includeMax == tc2.includeMax && tc1.max.Equal(tc2.max)) { - return false - } - } else if !tc2.maxIsInf() { - return false - } - - for k, e := range tc1.excl { - if !e.Equal(tc2.excl[k]) { - return false - } - } - return true - } - return false - case unionConstraint: - if tc2, ok := c2.(unionConstraint); ok { - if len(tc1) != len(tc2) { - return false - } - - for k, c := range tc1 { - if !constraintEq(c, tc2[k]) { - return false - } - } - return true - } - return false - } - - panic("unknown type") -} - -// newV is a helper to create a new Version object. -func newV(major, minor, patch uint64) Version { - return Version{ - major: major, - minor: minor, - patch: patch, - } -} - -func TestConstraintCheck(t *testing.T) { - tests := []struct { - constraint string - version string - check bool - }{ - {"= 2.0", "1.2.3", false}, - {"= 2.0", "2.0.0", true}, - {"4.1", "4.1.0", true}, - {"!=4.1", "4.1.0", false}, - {"!=4.1", "5.1.0", true}, - {">1.1", "4.1.0", true}, - {">1.1", "1.1.0", false}, - {"<1.1", "0.1.0", true}, - {"<1.1", "1.1.0", false}, - {"<1.1", "1.1.1", false}, - {">=1.1", "4.1.0", true}, - {">=1.1", "1.1.0", true}, - {">=1.1", "0.0.9", false}, - {"<=1.1", "0.1.0", true}, - {"<=1.1", "1.1.0", true}, - {"<=1.1", "1.1.1", false}, - {"<=1.1-alpha1", "1.1", false}, - {"<=2.x", "3.0.0", false}, - {"<=2.x", "2.9.9", true}, - {"<2.x", "2.0.0", false}, - {"<2.x", "1.9.9", true}, - {">=2.x", "3.0.0", true}, - {">=2.x", "2.9.9", true}, - {">=2.x", "1.9.9", false}, - {">2.x", "3.0.0", true}, - {">2.x", "2.9.9", false}, - {">2.x", "1.9.9", false}, - {"<=2.x-alpha2", "3.0.0-alpha3", false}, - {"<=2.0.0", "2.0.0-alpha1", false}, - {">2.x-beta1", "3.0.0-alpha2", false}, - {"^2.0.0", "3.0.0-alpha2", false}, - {"^2.0.0", "2.0.0-alpha1", false}, - {"^2.1.0-alpha1", "2.1.0-alpha2", true}, // allow prerelease match within same major/minor/patch - {"^2.1.0-alpha1", "2.1.1-alpha2", false}, // but ONLY within same major/minor/patch - {"^2.1.0-alpha3", "2.1.0-alpha2", false}, // still respect prerelease ordering - {"^2.0.0", "2.0.0-alpha2", false}, // and only if the min has a prerelease - } - - for _, tc := range tests { - if testing.Verbose() { - t.Logf("Testing if %q allows %q", tc.constraint, tc.version) - } - c, err := parseConstraint(tc.constraint, false) - if err != nil { - t.Errorf("err: %s", err) - continue - } - - v, err := NewVersion(tc.version) - if err != nil { - t.Errorf("err: %s", err) - continue - } - - a := c.Matches(v) == nil - if a != tc.check { - if tc.check { - t.Errorf("%q should have matched %q", tc.constraint, tc.version) - } else { - t.Errorf("%q should not have matched %q", tc.constraint, tc.version) - } - } - } -} - -func TestNewConstraint(t *testing.T) { - tests := []struct { - input string - c Constraint - err bool - }{ - {">= 1.1", rangeConstraint{ - min: newV(1, 1, 0), - max: Version{special: infiniteVersion}, - includeMin: true, - }, false}, - {"2.0", newV(2, 0, 0), false}, - {">= bar", nil, true}, - {"^1.1.0", rangeConstraint{ - min: newV(1, 1, 0), - max: newV(2, 0, 0), - includeMin: true, - }, false}, - {">= 1.2.3, < 2.0 || => 3.0, < 4", unionConstraint{ - rangeConstraint{ - min: newV(1, 2, 3), - max: newV(2, 0, 0), - includeMin: true, - }, - rangeConstraint{ - min: newV(3, 0, 0), - max: newV(4, 0, 0), - includeMin: true, - }, - }, false}, - {"3-4 || => 1.0, < 2", Union( - rangeConstraint{ - min: newV(3, 0, 0), - max: newV(4, 0, 0), - includeMin: true, - includeMax: true, - }, - rangeConstraint{ - min: newV(1, 0, 0), - max: newV(2, 0, 0), - includeMin: true, - }, - ), false}, - // demonstrates union compression - {"3-4 || => 3.0, < 4", rangeConstraint{ - min: newV(3, 0, 0), - max: newV(4, 0, 0), - includeMin: true, - includeMax: true, - }, false}, - {">=1.1.0, <2.0.0", rangeConstraint{ - min: newV(1, 1, 0), - max: newV(2, 0, 0), - includeMin: true, - includeMax: false, - }, false}, - {"!=1.4.0", rangeConstraint{ - min: Version{special: zeroVersion}, - max: Version{special: infiniteVersion}, - excl: []Version{ - newV(1, 4, 0), - }, - }, false}, - {">=1.1.0, !=1.4.0", rangeConstraint{ - min: newV(1, 1, 0), - max: Version{special: infiniteVersion}, - includeMin: true, - excl: []Version{ - newV(1, 4, 0), - }, - }, false}, - } - - for _, tc := range tests { - c, err := NewConstraint(tc.input) - if tc.err && err == nil { - t.Errorf("expected but did not get error for: %s", tc.input) - continue - } else if !tc.err && err != nil { - t.Errorf("unexpectederror for input %s: %s", tc.input, err) - continue - } - if tc.err { - continue - } - - if !constraintEq(tc.c, c) { - t.Errorf("%q produced constraint %q, but expected %q", tc.input, c, tc.c) - } - } -} - -func TestNewConstraintIC(t *testing.T) { - tests := []struct { - input string - c Constraint - err bool - }{ - {"=2.0", newV(2, 0, 0), false}, - {"= 2.0", newV(2, 0, 0), false}, - {"1.1.0", rangeConstraint{ - min: newV(1, 1, 0), - max: newV(2, 0, 0), - includeMin: true, - }, false}, - {"1.1", rangeConstraint{ - min: newV(1, 1, 0), - max: newV(2, 0, 0), - includeMin: true, - }, false}, - {"v1.1.0-12-abc123", rangeConstraint{ - min: Version{major: 1, minor: 1, patch: 0, pre: "12-abc123"}, - max: newV(2, 0, 0), - includeMin: true, - includeMax: false, - }, false}, - } - - for _, tc := range tests { - c, err := NewConstraintIC(tc.input) - if tc.err && err == nil { - t.Errorf("expected but did not get error for: %s", tc.input) - continue - } else if !tc.err && err != nil { - t.Errorf("unexpectederror for input %s: %s", tc.input, err) - continue - } - if tc.err { - continue - } - - if !constraintEq(tc.c, c) { - t.Errorf("%q produced constraint %q, but expected %q", tc.input, c, tc.c) - } - } -} - -func TestConstraintsCheck(t *testing.T) { - tests := []struct { - constraint string - version string - check bool - }{ - {"*", "1.2.3", true}, - {"~0.0.0", "1.2.3", false}, - {"0.x.x", "1.2.3", false}, - {"0.0.x", "1.2.3", false}, - {"~0.0.0", "0.1.9", false}, - {"~0.0.0", "0.0.9", true}, - {"^0.0.0", "0.0.9", true}, - {"^0.0.0", "0.1.9", false}, // caret behaves like tilde below 1.0.0 - {"= 2.0", "1.2.3", false}, - {"= 2.0", "2.0.0", true}, - {"4.1", "4.1.0", true}, - {"4.1.x", "4.1.3", true}, - {"1.x", "1.4", true}, - {"!=4.1", "4.1.0", false}, - {"!=4.1", "5.1.0", true}, - {"!=4.x", "5.1.0", true}, - {"!=4.x", "4.1.0", false}, - {"!=4.1.x", "4.2.0", true}, - {"!=4.2.x", "4.2.3", false}, - {">1.1", "4.1.0", true}, - {">1.1", "1.1.0", false}, - {"<1.1", "0.1.0", true}, - {"<1.1", "1.1.0", false}, - {"<1.1", "1.1.1", false}, - {"<1.x", "1.1.1", false}, - {"<1.x", "0.9.1", true}, - {"<1.x", "2.1.1", false}, - {"<1.1.x", "1.2.1", false}, - {"<1.1.x", "1.1.500", false}, - {"<1.1.x", "1.0.500", true}, - {"<1.2.x", "1.1.1", true}, - {">=1.1", "4.1.0", true}, - {">=1.1", "1.1.0", true}, - {">=1.1", "0.0.9", false}, - {"<=1.1", "0.1.0", true}, - {"<=1.1", "1.1.0", true}, - {"<=1.x", "1.1.0", true}, - {"<=2.x", "3.1.0", false}, - {"<=1.1", "1.1.1", false}, - {"<=1.1.x", "1.2.500", false}, - {">1.1, <2", "1.1.1", true}, - {">1.1, <3", "4.3.2", false}, - {">=1.1, <2, !=1.2.3", "1.2.3", false}, - {">=1.1, <2, !=1.2.3 || > 3", "3.1.2", true}, - {">=1.1, <2, !=1.2.3 || >= 3", "3.0.0", true}, - {">=1.1, <2, !=1.2.3 || > 3", "3.0.0", false}, - {">=1.1, <2, !=1.2.3 || > 3", "1.2.3", false}, - {"1.1 - 2", "1.1.1", true}, - {"1.1-3", "4.3.2", false}, - {"^1.1", "1.1.1", true}, - {"^1.1", "4.3.2", false}, - {"^1.x", "1.1.1", true}, - {"^2.x", "1.1.1", false}, - {"^1.x", "2.1.1", false}, - {"~*", "2.1.1", true}, - {"~1.x", "2.1.1", false}, - {"~1.x", "1.3.5", true}, - {"~1.x", "1.4", true}, - {"~1.1", "1.1.1", true}, - {"~1.2.3", "1.2.5", true}, - {"~1.2.3", "1.2.2", false}, - {"~1.2.3", "1.3.2", false}, - {"~1.1", "1.2.3", false}, - {"~1.3", "2.4.5", false}, - } - - for _, tc := range tests { - c, err := NewConstraint(tc.constraint) - if err != nil { - t.Errorf("err: %s", err) - continue - } - - v, err := NewVersion(tc.version) - if err != nil { - t.Errorf("err: %s", err) - continue - } - - a := c.Matches(v) == nil - if a != tc.check { - if a { - t.Errorf("Input %q produced constraint %q; should not have admitted %q, but did", tc.constraint, c, tc.version) - } else { - t.Errorf("Input %q produced constraint %q; should have admitted %q, but did not", tc.constraint, c, tc.version) - } - } - } -} - -func TestBidirectionalSerialization(t *testing.T) { - tests := []struct { - io string - eq bool - }{ - {"*", true}, // any - {"~0.0.0", false}, // tildes expand into ranges - {"=2.0", false}, // abbreviated versions print as full - {"4.1.x", false}, // wildcards expand into ranges - {">= 1.1.0", false}, // does not produce spaces on ranges - {"4.1.0", true}, - {"!=4.1.0", true}, - {">=1.1.0", true}, - {">1.0.0, <=1.1.0", true}, - {"<=1.1.0", true}, - {">=1.1.7, <1.3.0", true}, // tilde width - {">=1.1.0, <=2.0.0", true}, // no unary op on lte max - {">1.1.3, <2.0.0", true}, // no unary op on gt min - {">1.1.0, <=2.0.0", true}, // no unary op on gt min and lte max - {">=1.1.0, <=1.2.0", true}, // no unary op on lte max - {">1.1.1, <1.2.0", true}, // no unary op on gt min - {">1.1.7, <=2.0.0", true}, // no unary op on gt min and lte max - {">1.1.7, <=2.0.0", true}, // no unary op on gt min and lte max - {">=0.1.7, <1.0.0", true}, // caret shifting below 1.0.0 - {">=0.1.7, <0.3.0", true}, // caret shifting width below 1.0.0 - } - - for _, fix := range tests { - c, err := NewConstraint(fix.io) - if err != nil { - t.Errorf("Valid constraint string produced unexpected error: %s", err) - } - - eq := fix.io == c.String() - if eq != fix.eq { - if eq { - t.Errorf("Constraint %q should not have reproduced input string %q, but did", c, fix.io) - } else { - t.Errorf("Constraint should have reproduced input string %q, but instead produced %q", fix.io, c) - } - } - } -} - -func TestBidirectionalSerializationIC(t *testing.T) { - tests := []struct { - io string - eq bool - }{ - {"*", true}, // any - {"=2.0.0", true}, // versions retain leading = - {"2.0.0", true}, // (no) caret in, (no) caret out - } - - for _, fix := range tests { - c, err := NewConstraintIC(fix.io) - if err != nil { - t.Errorf("Valid constraint string produced unexpected error: %s", err) - } - - eq := fix.io == c.ImpliedCaretString() - if eq != fix.eq { - if eq { - t.Errorf("Constraint %q should not have reproduced input string %q, but did", c, fix.io) - } else { - t.Errorf("Constraint should have reproduced input string %q, but instead produced %q", fix.io, c) - } - } - } -} - -func TestPreferUnaryOpForm(t *testing.T) { - tests := []struct { - in, out string - }{ - {">=0.1.7, <0.2.0", "^0.1.7"}, // caret shifting below 1.0.0 - {">=1.1.0, <2.0.0", "^1.1.0"}, - {">=1.1.0, <2.0.0, !=1.2.3", "^1.1.0, !=1.2.3"}, - } - - for _, fix := range tests { - c, err := NewConstraint(fix.in) - if err != nil { - t.Errorf("Valid constraint string produced unexpected error: %s", err) - } - - if fix.out != c.String() { - t.Errorf("Constraint %q was not transformed into expected output string %q", fix.in, fix.out) - } - } -} - -func TestRewriteRange(t *testing.T) { - tests := []struct { - c string - nc string - }{ - {"2-3", ">= 2, <= 3"}, - {"2-3, 2-3", ">= 2, <= 3,>= 2, <= 3"}, - {"2-3, 4.0.0-5.1", ">= 2, <= 3,>= 4.0.0, <= 5.1"}, - {"v2-3, 2-3", "v2-3,>= 2, <= 3"}, - } - - for _, tc := range tests { - o := rewriteRange(tc.c) - - if o != tc.nc { - t.Errorf("Range %s rewritten incorrectly as '%s'", tc.c, o) - } - } -} - -func TestIsX(t *testing.T) { - tests := []struct { - t string - c bool - }{ - {"A", false}, - {"%", false}, - {"X", true}, - {"x", true}, - {"*", true}, - } - - for _, tc := range tests { - a := isX(tc.t) - if a != tc.c { - t.Errorf("Function isX error on %s", tc.t) - } - } -} - -func TestUnionErr(t *testing.T) { - u1 := Union( - rangeConstraint{ - min: newV(3, 0, 0), - max: newV(4, 0, 0), - includeMin: true, - includeMax: true, - }, - rangeConstraint{ - min: newV(1, 0, 0), - max: newV(2, 0, 0), - includeMin: true, - }, - ) - fail := u1.Matches(newV(2, 5, 0)) - failstr := `2.5.0 is greater than or equal to the maximum of ^1.0.0 -2.5.0 is less than the minimum of >=3.0.0, <=4.0.0` - if fail.Error() != failstr { - t.Errorf("Did not get expected failure message from union, got %q", fail) - } -} - -func TestIsSuperset(t *testing.T) { - rc := []rangeConstraint{ - { - min: newV(1, 2, 0), - max: newV(2, 0, 0), - includeMin: true, - }, - { - min: newV(1, 2, 0), - max: newV(2, 1, 0), - }, - { - min: Version{special: zeroVersion}, - max: newV(1, 10, 0), - }, - { - min: newV(2, 0, 0), - max: Version{special: infiniteVersion}, - }, - { - min: newV(1, 2, 0), - max: newV(2, 0, 0), - includeMax: true, - }, - } - - for _, c := range rc { - - // Superset comparison is not strict, so a range should always be a superset - // of itself. - if !c.isSupersetOf(c) { - t.Errorf("Ranges should be supersets of themselves; %s indicated it was not", c) - } - } - - pairs := []struct{ l, r rangeConstraint }{ - { - // ensures lte is handled correctly (min side) - l: rc[0], - r: rc[1], - }, - { - // ensures nil on min side works well - l: rc[0], - r: rc[2], - }, - { - // ensures nil on max side works well - l: rc[0], - r: rc[3], - }, - { - // ensures nils on both sides work well - l: rc[2], - r: rc[3], - }, - { - // ensures gte is handled correctly (max side) - l: rc[2], - r: rc[4], - }, - } - - for _, p := range pairs { - if p.l.isSupersetOf(p.r) { - t.Errorf("%s is not a superset of %s", p.l, p.r) - } - if p.r.isSupersetOf(p.l) { - t.Errorf("%s is not a superset of %s", p.r, p.l) - } - } - - rc[1].max.minor = 0 - - if !rc[0].isSupersetOf(rc[1]) { - t.Errorf("%s is a superset of %s", rc[0], rc[1]) - } - rc[1].includeMax = true - if rc[1].isSupersetOf(rc[0]) { - t.Errorf("%s is not a superset of %s", rc[1], rc[0]) - } - rc[0].includeMin = false - if !rc[1].isSupersetOf(rc[0]) { - t.Errorf("%s is a superset of %s", rc[1], rc[0]) - } - - // isSupersetOf ignores excludes, so even though this would make rc[1] not a - // superset of rc[0] anymore, it should still say it is. - rc[1].excl = []Version{ - newV(1, 5, 0), - } - - if !rc[1].isSupersetOf(rc[0]) { - t.Errorf("%s is still a superset of %s, because isSupersetOf is supposed to ignore excluded versions", rc[1], rc[0]) - } -} diff --git a/installer/vendor/github.com/Masterminds/semver/set_ops_test.go b/installer/vendor/github.com/Masterminds/semver/set_ops_test.go deleted file mode 100644 index c08f27618d..0000000000 --- a/installer/vendor/github.com/Masterminds/semver/set_ops_test.go +++ /dev/null @@ -1,932 +0,0 @@ -package semver - -import "testing" - -func TestIntersection(t *testing.T) { - var actual Constraint - rc1 := rangeConstraint{ - min: newV(1, 0, 0), - max: newV(2, 0, 0), - } - - if actual = Intersection(); !IsNone(actual) { - t.Errorf("Intersection of nothing should always produce None; got %q", actual) - } - - if actual = Intersection(rc1); !constraintEq(actual, rc1) { - t.Errorf("Intersection of one item should always return that item; got %q", actual) - } - - if actual = Intersection(rc1, None()); !IsNone(actual) { - t.Errorf("Intersection of anything with None should always produce None; got %q", actual) - } - - if actual = Intersection(rc1, Any()); !constraintEq(actual, rc1) { - t.Errorf("Intersection of anything with Any should return self; got %q", actual) - } - - v1 := newV(1, 5, 0) - if actual = Intersection(rc1, v1); !constraintEq(actual, v1) { - t.Errorf("Got constraint %q, but expected %q", actual, v1) - } - - rc2 := rangeConstraint{ - min: newV(1, 2, 0), - max: newV(2, 2, 0), - } - result := rangeConstraint{ - min: newV(1, 2, 0), - max: newV(2, 0, 0), - } - - if actual = Intersection(rc1, rc2); !constraintEq(actual, result) { - t.Errorf("Got constraint %q, but expected %q", actual, result) - } - - u1 := unionConstraint{ - rangeConstraint{ - min: newV(1, 2, 0), - max: newV(3, 0, 0), - }, - newV(3, 1, 0), - } - - if actual = Intersection(u1, rc1); !constraintEq(actual, result) { - t.Errorf("Got constraint %q, but expected %q", actual, result) - } - if actual = Intersection(rc1, newV(2, 0, 5), u1); !IsNone(actual) { - t.Errorf("First two are disjoint, should have gotten None but got %q", actual) - } -} - -func TestRangeIntersection(t *testing.T) { - var actual Constraint - // Test magic cases - rc1 := rangeConstraint{ - min: newV(1, 0, 0), - max: newV(2, 0, 0), - } - if actual = rc1.Intersect(Any()); !constraintEq(actual, rc1) { - t.Errorf("Intersection of anything with Any should return self; got %q", actual) - } - if actual = rc1.Intersect(None()); !IsNone(actual) { - t.Errorf("Intersection of anything with None should always produce None; got %q", actual) - } - - // Test single version cases - - // single v, in range - v1 := newV(1, 5, 0) - - if actual = rc1.Intersect(v1); !constraintEq(actual, v1) { - t.Errorf("Intersection of version with matching range should return the version; got %q", actual) - } - - // now exclude just that version - rc1.excl = []Version{v1} - if actual = rc1.Intersect(v1); !IsNone(actual) { - t.Errorf("Intersection of version with range having specific exclude for that version should produce None; got %q", actual) - } - - // and, of course, none if the version is out of range - v2 := newV(0, 5, 0) - if actual = rc1.Intersect(v2); !IsNone(actual) { - t.Errorf("Intersection of version with non-matching range should produce None; got %q", actual) - } - - // Test basic overlap case - rc1 = rangeConstraint{ - min: newV(1, 0, 0), - max: newV(2, 0, 0), - } - rc2 := rangeConstraint{ - min: newV(1, 2, 0), - max: newV(2, 2, 0), - } - result := rangeConstraint{ - min: newV(1, 2, 0), - max: newV(2, 0, 0), - } - - if actual = rc1.Intersect(rc2); !constraintEq(actual, result) { - t.Errorf("Got constraint %q, but expected %q", actual, result) - } - if actual = rc2.Intersect(rc1); !constraintEq(actual, result) { - t.Errorf("Got constraint %q, but expected %q", actual, result) - } - - // And with includes - rc1.includeMin = true - rc1.includeMax = true - rc2.includeMin = true - rc2.includeMax = true - result.includeMin = true - result.includeMax = true - - if actual = rc1.Intersect(rc2); !constraintEq(actual, result) { - t.Errorf("Got constraint %q, but expected %q", actual, result) - } - if actual = rc2.Intersect(rc1); !constraintEq(actual, result) { - t.Errorf("Got constraint %q, but expected %q", actual, result) - } - - // Overlaps with nils - rc1 = rangeConstraint{ - min: newV(1, 0, 0), - max: Version{special: infiniteVersion}, - } - rc2 = rangeConstraint{ - min: Version{special: zeroVersion}, - max: newV(2, 2, 0), - } - result = rangeConstraint{ - min: newV(1, 0, 0), - max: newV(2, 2, 0), - } - - if actual = rc1.Intersect(rc2); !constraintEq(actual, result) { - t.Errorf("Got constraint %q, but expected %q", actual, result) - } - if actual = rc2.Intersect(rc1); !constraintEq(actual, result) { - t.Errorf("Got constraint %q, but expected %q", actual, result) - } - - // And with includes - rc1.includeMin = true - rc2.includeMax = true - result.includeMin = true - result.includeMax = true - - if actual = rc1.Intersect(rc2); !constraintEq(actual, result) { - t.Errorf("Got constraint %q, but expected %q", actual, result) - } - if actual = rc2.Intersect(rc1); !constraintEq(actual, result) { - t.Errorf("Got constraint %q, but expected %q", actual, result) - } - - // Test superset overlap case - rc1 = rangeConstraint{ - min: newV(1, 5, 0), - max: newV(2, 0, 0), - } - rc2 = rangeConstraint{ - min: newV(1, 0, 0), - max: newV(3, 0, 0), - } - result = rangeConstraint{ - min: newV(1, 5, 0), - max: newV(2, 0, 0), - } - - if actual = rc1.Intersect(rc2); !constraintEq(actual, result) { - t.Errorf("Got constraint %q, but expected %q", actual, result) - } - if actual = rc2.Intersect(rc1); !constraintEq(actual, result) { - t.Errorf("Got constraint %q, but expected %q", actual, result) - } - - // Make sure irrelevant includes don't leak in - rc2.includeMin = true - rc2.includeMax = true - - if actual = rc1.Intersect(rc2); !constraintEq(actual, result) { - t.Errorf("Got constraint %q, but expected %q", actual, result) - } - if actual = rc2.Intersect(rc1); !constraintEq(actual, result) { - t.Errorf("Got constraint %q, but expected %q", actual, result) - } - - // But relevant includes get used - rc1.includeMin = true - rc1.includeMax = true - result.includeMin = true - result.includeMax = true - - if actual = rc1.Intersect(rc2); !constraintEq(actual, result) { - t.Errorf("Got constraint %q, but expected %q", actual, result) - } - if actual = rc2.Intersect(rc1); !constraintEq(actual, result) { - t.Errorf("Got constraint %q, but expected %q", actual, result) - } - - // Test disjoint case - rc1 = rangeConstraint{ - min: newV(1, 5, 0), - max: newV(1, 6, 0), - } - rc2 = rangeConstraint{ - min: newV(2, 0, 0), - max: newV(3, 0, 0), - } - - if actual = rc1.Intersect(rc2); !constraintEq(actual, None()) { - t.Errorf("Got constraint %q, but expected %q", actual, None()) - } - if actual = rc2.Intersect(rc1); !constraintEq(actual, None()) { - t.Errorf("Got constraint %q, but expected %q", actual, None()) - } - - // Test disjoint at gt/lt boundary (non-adjacent) - rc1 = rangeConstraint{ - min: newV(1, 5, 0), - max: newV(2, 0, 0), - } - rc2 = rangeConstraint{ - min: newV(2, 0, 0), - max: newV(3, 0, 0), - } - - if actual = rc1.Intersect(rc2); !constraintEq(actual, None()) { - t.Errorf("Got constraint %q, but expected %q", actual, None()) - } - if actual = rc2.Intersect(rc1); !constraintEq(actual, None()) { - t.Errorf("Got constraint %q, but expected %q", actual, None()) - } - - // Now, just have them touch at a single version - rc1.includeMax = true - rc2.includeMin = true - - vresult := newV(2, 0, 0) - if actual = rc1.Intersect(rc2); !constraintEq(actual, vresult) { - t.Errorf("Got constraint %q, but expected %q", actual, vresult) - } - if actual = rc2.Intersect(rc1); !constraintEq(actual, vresult) { - t.Errorf("Got constraint %q, but expected %q", actual, vresult) - } - - // Test excludes in intersection range - rc1 = rangeConstraint{ - min: newV(1, 5, 0), - max: newV(2, 0, 0), - excl: []Version{ - newV(1, 6, 0), - }, - } - rc2 = rangeConstraint{ - min: newV(1, 0, 0), - max: newV(3, 0, 0), - } - - if actual = rc1.Intersect(rc2); !constraintEq(actual, rc1) { - t.Errorf("Got constraint %q, but expected %q", actual, rc1) - } - if actual = rc2.Intersect(rc1); !constraintEq(actual, rc1) { - t.Errorf("Got constraint %q, but expected %q", actual, rc1) - } - - // Test excludes not in intersection range - rc1 = rangeConstraint{ - min: newV(1, 5, 0), - max: newV(2, 0, 0), - } - rc2 = rangeConstraint{ - min: newV(1, 0, 0), - max: newV(3, 0, 0), - excl: []Version{ - newV(1, 1, 0), - }, - } - - if actual = rc1.Intersect(rc2); !constraintEq(actual, rc1) { - t.Errorf("Got constraint %q, but expected %q", actual, rc1) - } - if actual = rc2.Intersect(rc1); !constraintEq(actual, rc1) { - t.Errorf("Got constraint %q, but expected %q", actual, rc1) - } - - // Test min, and greater min - rc1 = rangeConstraint{ - min: newV(1, 0, 0), - max: Version{special: infiniteVersion}, - } - rc2 = rangeConstraint{ - min: newV(1, 5, 0), - max: Version{special: infiniteVersion}, - includeMin: true, - } - - if actual = rc1.Intersect(rc2); !constraintEq(actual, rc2) { - t.Errorf("Got constraint %q, but expected %q", actual, result) - } - if actual = rc2.Intersect(rc1); !constraintEq(actual, rc2) { - t.Errorf("Got constraint %q, but expected %q", actual, result) - } - - // Test max, and lesser max - rc1 = rangeConstraint{ - max: newV(1, 0, 0), - } - rc2 = rangeConstraint{ - max: newV(1, 5, 0), - } - result = rangeConstraint{ - max: newV(1, 0, 0), - } - - if actual = rc1.Intersect(rc2); !constraintEq(actual, result) { - t.Errorf("Got constraint %q, but expected %q", actual, result) - } - if actual = rc2.Intersect(rc1); !constraintEq(actual, result) { - t.Errorf("Got constraint %q, but expected %q", actual, result) - } - - // Ensure pure excludes come through as they should - rc1 = rangeConstraint{ - min: Version{special: zeroVersion}, - max: Version{special: infiniteVersion}, - excl: []Version{ - newV(1, 6, 0), - }, - } - - rc2 = rangeConstraint{ - min: Version{special: zeroVersion}, - max: Version{special: infiniteVersion}, - excl: []Version{ - newV(1, 6, 0), - newV(1, 7, 0), - }, - } - - if actual = Any().Intersect(rc1); !constraintEq(actual, rc1) { - t.Errorf("Got constraint %q, but expected %q", actual, rc1) - } - if actual = rc1.Intersect(Any()); !constraintEq(actual, rc1) { - t.Errorf("Got constraint %q, but expected %q", actual, rc1) - } - if actual = rc1.Intersect(rc2); !constraintEq(actual, rc2) { - t.Errorf("Got constraint %q, but expected %q", actual, rc2) - } - - // TODO test the pre-release special range stuff -} - -func TestRangeUnion(t *testing.T) { - var actual Constraint - // Test magic cases - rc1 := rangeConstraint{ - min: newV(1, 0, 0), - max: newV(2, 0, 0), - } - if actual = rc1.Union(Any()); !IsAny(actual) { - t.Errorf("Union of anything with Any should always produce Any; got %q", actual) - } - if actual = rc1.Union(None()); !constraintEq(actual, rc1) { - t.Errorf("Union of anything with None should return self; got %q", actual) - } - - // Test single version cases - - // single v, in range - v1 := newV(1, 5, 0) - - if actual = rc1.Union(v1); !constraintEq(actual, rc1) { - t.Errorf("Union of version with matching range should return the range; got %q", actual) - } - - // now exclude just that version - rc2 := rc1.dup() - rc2.excl = []Version{v1} - if actual = rc2.Union(v1); !constraintEq(actual, rc1) { - t.Errorf("Union of version with range having specific exclude for that version should produce the range without that exclude; got %q", actual) - } - - // and a union if the version is not within the range - v2 := newV(0, 5, 0) - uresult := unionConstraint{v2, rc1} - if actual = rc1.Union(v2); !constraintEq(actual, uresult) { - t.Errorf("Union of version with non-matching range should produce a unionConstraint with those two; got %q", actual) - } - - // union with version at the min should ensure "oreq" - v2 = newV(1, 0, 0) - rc3 := rc1 - rc3.includeMin = true - - if actual = rc1.Union(v2); !constraintEq(actual, rc3) { - t.Errorf("Union of range with version at min end should add includeMin (%q), but got %q", rc3, actual) - } - if actual = v2.Union(rc1); !constraintEq(actual, rc3) { - t.Errorf("Union of range with version at min end should add includeMin (%q), but got %q", rc3, actual) - } - - // same at max end - v2 = newV(2, 0, 0) - rc3.includeMin = false - rc3.includeMax = true - - if actual = rc1.Union(v2); !constraintEq(actual, rc3) { - t.Errorf("Union of range with version at max end should add includeMax (%q), but got %q", rc3, actual) - } - if actual = v2.Union(rc1); !constraintEq(actual, rc3) { - t.Errorf("Union of range with version at max end should add includeMax (%q), but got %q", rc3, actual) - } - - // Test basic overlap case - rc1 = rangeConstraint{ - min: newV(1, 0, 0), - max: newV(2, 0, 0), - } - rc2 = rangeConstraint{ - min: newV(1, 2, 0), - max: newV(2, 2, 0), - } - result := rangeConstraint{ - min: newV(1, 0, 0), - max: newV(2, 2, 0), - } - - if actual = rc1.Union(rc2); !constraintEq(actual, result) { - t.Errorf("Got constraint %q, but expected %q", actual, result) - } - if actual = rc2.Union(rc1); !constraintEq(actual, result) { - t.Errorf("Got constraint %q, but expected %q", actual, result) - } - - // And with includes - rc1.includeMin = true - rc1.includeMax = true - rc2.includeMin = true - rc2.includeMax = true - result.includeMin = true - result.includeMax = true - - if actual = rc1.Union(rc2); !constraintEq(actual, result) { - t.Errorf("Got constraint %q, but expected %q", actual, result) - } - if actual = rc2.Union(rc1); !constraintEq(actual, result) { - t.Errorf("Got constraint %q, but expected %q", actual, result) - } - - // Overlaps with nils - rc1 = rangeConstraint{ - min: newV(1, 0, 0), - max: Version{special: infiniteVersion}, - } - rc2 = rangeConstraint{ - min: Version{special: zeroVersion}, - max: newV(2, 2, 0), - } - - if actual = rc1.Union(rc2); !constraintEq(actual, Any()) { - t.Errorf("Got constraint %q, but expected %q", actual, Any()) - } - if actual = rc2.Union(rc1); !constraintEq(actual, Any()) { - t.Errorf("Got constraint %q, but expected %q", actual, Any()) - } - - // Just one nil in overlap - rc1.max = newV(2, 0, 0) - result = rangeConstraint{ - min: Version{special: zeroVersion}, - max: newV(2, 2, 0), - } - - if actual = rc1.Union(rc2); !constraintEq(actual, result) { - t.Errorf("Got constraint %q, but expected %q", actual, result) - } - if actual = rc2.Union(rc1); !constraintEq(actual, result) { - t.Errorf("Got constraint %q, but expected %q", actual, result) - } - - rc1.max = Version{special: infiniteVersion} - rc2.min = newV(1, 5, 0) - result = rangeConstraint{ - min: newV(1, 0, 0), - max: Version{special: infiniteVersion}, - } - - if actual = rc1.Union(rc2); !constraintEq(actual, result) { - t.Errorf("Got constraint %q, but expected %q", actual, result) - } - if actual = rc2.Union(rc1); !constraintEq(actual, result) { - t.Errorf("Got constraint %q, but expected %q", actual, result) - } - - // Test superset overlap case - rc1 = rangeConstraint{ - min: newV(1, 5, 0), - max: newV(2, 0, 0), - } - rc2 = rangeConstraint{ - min: newV(1, 0, 0), - max: newV(3, 0, 0), - } - - if actual = rc1.Union(rc2); !constraintEq(actual, rc2) { - t.Errorf("Got constraint %q, but expected %q", actual, rc2) - } - if actual = rc2.Union(rc1); !constraintEq(actual, rc2) { - t.Errorf("Got constraint %q, but expected %q", actual, rc2) - } - - // Test disjoint case - rc1 = rangeConstraint{ - min: newV(1, 5, 0), - max: newV(1, 6, 0), - } - rc2 = rangeConstraint{ - min: newV(2, 0, 0), - max: newV(3, 0, 0), - } - uresult = unionConstraint{rc1, rc2} - - if actual = rc1.Union(rc2); !constraintEq(actual, uresult) { - t.Errorf("Got constraint %q, but expected %q", actual, uresult) - } - if actual = rc2.Union(rc1); !constraintEq(actual, uresult) { - t.Errorf("Got constraint %q, but expected %q", actual, uresult) - } - - // Test disjoint at gt/lt boundary (non-adjacent) - rc1 = rangeConstraint{ - min: newV(1, 5, 0), - max: newV(2, 0, 0), - } - rc2 = rangeConstraint{ - min: newV(2, 0, 0), - max: newV(3, 0, 0), - } - uresult = unionConstraint{rc1, rc2} - - if actual = rc1.Union(rc2); !constraintEq(actual, uresult) { - t.Errorf("Got constraint %q, but expected %q", actual, uresult) - } - if actual = rc2.Union(rc1); !constraintEq(actual, uresult) { - t.Errorf("Got constraint %q, but expected %q", actual, uresult) - } - - // Now, just have them touch at a single version - rc1.includeMax = true - rc2.includeMin = true - result = rangeConstraint{ - min: newV(1, 5, 0), - max: newV(3, 0, 0), - } - - if actual = rc1.Union(rc2); !constraintEq(actual, result) { - t.Errorf("Got constraint %q, but expected %q", actual, result) - } - if actual = rc2.Union(rc1); !constraintEq(actual, result) { - t.Errorf("Got constraint %q, but expected %q", actual, result) - } - - // And top-adjacent at that version - rc2.includeMin = false - if actual = rc1.Union(rc2); !constraintEq(actual, result) { - t.Errorf("Got constraint %q, but expected %q", actual, result) - } - if actual = rc2.Union(rc1); !constraintEq(actual, result) { - t.Errorf("Got constraint %q, but expected %q", actual, result) - } - // And bottom-adjacent at that version - rc1.includeMax = false - rc2.includeMin = true - if actual = rc1.Union(rc2); !constraintEq(actual, result) { - t.Errorf("Got constraint %q, but expected %q", actual, result) - } - if actual = rc2.Union(rc1); !constraintEq(actual, result) { - t.Errorf("Got constraint %q, but expected %q", actual, result) - } - - // Test excludes in overlapping range - rc1 = rangeConstraint{ - min: newV(1, 5, 0), - max: newV(2, 0, 0), - excl: []Version{ - newV(1, 6, 0), - }, - } - rc2 = rangeConstraint{ - min: newV(1, 0, 0), - max: newV(3, 0, 0), - } - - if actual = rc1.Union(rc2); !constraintEq(actual, rc2) { - t.Errorf("Got constraint %q, but expected %q", actual, rc2) - } - if actual = rc2.Union(rc1); !constraintEq(actual, rc2) { - t.Errorf("Got constraint %q, but expected %q", actual, rc2) - } - - // Test excludes not in non-overlapping range - rc1 = rangeConstraint{ - min: newV(1, 5, 0), - max: newV(2, 0, 0), - } - rc2 = rangeConstraint{ - min: newV(1, 0, 0), - max: newV(3, 0, 0), - excl: []Version{ - newV(1, 1, 0), - }, - } - - if actual = rc1.Union(rc2); !constraintEq(actual, rc2) { - t.Errorf("Got constraint %q, but expected %q", actual, rc2) - } - if actual = rc2.Union(rc1); !constraintEq(actual, rc2) { - t.Errorf("Got constraint %q, but expected %q", actual, rc2) - } - - // Ensure pure excludes come through as they should - rc1 = rangeConstraint{ - min: Version{special: zeroVersion}, - max: Version{special: infiniteVersion}, - excl: []Version{ - newV(1, 6, 0), - }, - } - - rc2 = rangeConstraint{ - min: Version{special: zeroVersion}, - max: Version{special: infiniteVersion}, - excl: []Version{ - newV(1, 6, 0), - newV(1, 7, 0), - }, - } - - if actual = rc1.Union(rc2); !constraintEq(actual, rc1) { - t.Errorf("Got constraint %q, but expected %q", actual, rc1) - } - if actual = rc2.Union(rc1); !constraintEq(actual, rc1) { - t.Errorf("Got constraint %q, but expected %q", actual, rc1) - } - - rc1 = rangeConstraint{ - min: Version{special: zeroVersion}, - max: Version{special: infiniteVersion}, - excl: []Version{ - newV(1, 5, 0), - }, - } - - if actual = rc1.Union(rc2); !constraintEq(actual, Any()) { - t.Errorf("Got constraint %q, but expected %q", actual, Any()) - } - if actual = rc2.Union(rc1); !constraintEq(actual, Any()) { - t.Errorf("Got constraint %q, but expected %q", actual, Any()) - } - - // TODO test the pre-release special range stuff -} - -func TestUnionIntersection(t *testing.T) { - var actual Constraint - // magic first - u1 := unionConstraint{ - newV(1, 1, 0), - newV(1, 2, 0), - newV(1, 3, 0), - } - if actual = u1.Intersect(Any()); !constraintEq(actual, u1) { - t.Errorf("Intersection of anything with Any should return self; got %s", actual) - } - if actual = u1.Intersect(None()); !IsNone(actual) { - t.Errorf("Intersection of anything with None should always produce None; got %s", actual) - } - if u1.MatchesAny(None()) { - t.Errorf("Can't match any when intersected with None") - } - - // intersect of unions with single versions - v1 := newV(1, 1, 0) - if actual = u1.Intersect(v1); !constraintEq(actual, v1) { - t.Errorf("Got constraint %q, but expected %q", actual, v1) - } - if actual = v1.Intersect(u1); !constraintEq(actual, v1) { - t.Errorf("Got constraint %q, but expected %q", actual, v1) - } - - // intersect of range with union of versions - u1 = unionConstraint{ - newV(1, 1, 0), - newV(1, 2, 0), - newV(1, 3, 0), - } - rc1 := rangeConstraint{ - min: newV(1, 0, 0), - max: newV(2, 0, 0), - } - - if actual = u1.Intersect(rc1); !constraintEq(actual, u1) { - t.Errorf("Got constraint %q, but expected %q", actual, u1) - } - if actual = rc1.Intersect(u1); !constraintEq(actual, u1) { - t.Errorf("Got constraint %q, but expected %q", actual, u1) - } - - u2 := unionConstraint{ - newV(1, 1, 0), - newV(1, 2, 0), - } - - if actual = u1.Intersect(u2); !constraintEq(actual, u2) { - t.Errorf("Got constraint %q, but expected %q", actual, u2) - } - - // Overlapping sub/supersets - rc1 = rangeConstraint{ - min: newV(1, 5, 0), - max: newV(1, 6, 0), - } - rc2 := rangeConstraint{ - min: newV(2, 0, 0), - max: newV(3, 0, 0), - } - rc3 = rangeConstraint{ - min: newV(1, 0, 0), - max: newV(2, 0, 0), - } - rc4 := rangeConstraint{ - min: newV(2, 5, 0), - max: newV(2, 6, 0), - } - u1 = unionConstraint{rc1, rc2} - u2 = unionConstraint{rc3, rc4} - ur := unionConstraint{rc1, rc4} - - if actual = u1.Intersect(u2); !constraintEq(actual, ur) { - t.Errorf("Got constraint %q, but expected %q", actual, ur) - } - if actual = u2.Intersect(u1); !constraintEq(actual, ur) { - t.Errorf("Got constraint %q, but expected %q", actual, ur) - } - - // Ensure excludes carry as they should - rc1.excl = []Version{newV(1, 5, 5)} - u1 = unionConstraint{rc1, rc2} - ur = unionConstraint{rc1, rc4} - - if actual = u1.Intersect(u2); !constraintEq(actual, ur) { - t.Errorf("Got constraint %q, but expected %q", actual, ur) - } - if actual = u2.Intersect(u1); !constraintEq(actual, ur) { - t.Errorf("Got constraint %q, but expected %q", actual, ur) - } -} - -func TestUnionUnion(t *testing.T) { - var actual Constraint - // magic first - u1 := unionConstraint{ - newV(1, 1, 0), - newV(1, 2, 0), - newV(1, 3, 0), - } - if actual = u1.Union(Any()); !IsAny(actual) { - t.Errorf("Union of anything with Any should always return Any; got %s", actual) - } - if actual = u1.Union(None()); !constraintEq(actual, u1) { - t.Errorf("Union of anything with None should always return self; got %s", actual) - } - - // union of uc with single versions - // already present - v1 := newV(1, 2, 0) - if actual = u1.Union(v1); !constraintEq(actual, u1) { - t.Errorf("Got constraint %q, but expected %q", actual, u1) - } - if actual = v1.Union(u1); !constraintEq(actual, u1) { - t.Errorf("Got constraint %q, but expected %q", actual, u1) - } - - // not present - v2 := newV(1, 4, 0) - ur := append(u1, v2) - if actual = u1.Union(v2); !constraintEq(actual, ur) { - t.Errorf("Got constraint %q, but expected %q", actual, ur) - } - if actual = v2.Union(u1); !constraintEq(actual, ur) { - t.Errorf("Got constraint %q, but expected %q", actual, ur) - } - - // union of uc with uc, all versions - u2 := unionConstraint{ - newV(1, 3, 0), - newV(1, 4, 0), - newV(1, 5, 0), - } - ur = unionConstraint{ - newV(1, 1, 0), - newV(1, 2, 0), - newV(1, 3, 0), - newV(1, 4, 0), - newV(1, 5, 0), - } - - if actual = u1.Union(u2); !constraintEq(actual, ur) { - t.Errorf("Got constraint %q, but expected %q", actual, ur) - } - if actual = u2.Union(u1); !constraintEq(actual, ur) { - t.Errorf("Got constraint %q, but expected %q", actual, ur) - } - - // union that should compress versions into range - rc1 := rangeConstraint{ - min: newV(1, 0, 0), - max: newV(2, 0, 0), - } - - if actual = u1.Union(rc1); !constraintEq(actual, rc1) { - t.Errorf("Got constraint %q, but expected %q", actual, rc1) - } - if actual = rc1.Union(u1); !constraintEq(actual, rc1) { - t.Errorf("Got constraint %q, but expected %q", actual, rc1) - } - - rc1.max = newV(1, 4, 5) - u3 := append(u2, newV(1, 7, 0)) - ur = unionConstraint{ - rc1, - newV(1, 5, 0), - newV(1, 7, 0), - } - - if actual = u3.Union(rc1); !constraintEq(actual, ur) { - t.Errorf("Got constraint %q, but expected %q", actual, ur) - } - if actual = rc1.Union(u3); !constraintEq(actual, ur) { - t.Errorf("Got constraint %q, but expected %q", actual, ur) - } -} - -// Most version stuff got tested by range and/or union b/c most tests were -// repeated bidirectionally (set operations are commutative; testing in pairs -// helps us catch any situation where we fail to maintain that invariant) -func TestVersionSetOps(t *testing.T) { - var actual Constraint - - v1 := newV(1, 0, 0) - - if actual = v1.Intersect(v1); !constraintEq(actual, v1) { - t.Errorf("Version intersected with itself should be itself, got %q", actual) - } - if !v1.MatchesAny(v1) { - t.Errorf("MatchesAny should work with a version against itself") - } - - v2 := newV(2, 0, 0) - if actual = v1.Intersect(v2); !IsNone(actual) { - t.Errorf("Versions should only intersect with themselves, got %q", actual) - } - if v1.MatchesAny(v2) { - t.Errorf("MatchesAny should not work when combined with anything other than itself") - } - - result := unionConstraint{v1, v2} - - if actual = v1.Union(v1); !constraintEq(actual, v1) { - t.Errorf("Version union with itself should return self, got %q", actual) - } - - if actual = v1.Union(v2); !constraintEq(actual, result) { - t.Errorf("Got constraint %q, but expected %q", actual, result) - } - if actual = v1.Union(v2); !constraintEq(actual, result) { - // Duplicate just to make sure ordering works right - t.Errorf("Got constraint %q, but expected %q", actual, result) - } -} - -func TestAreAdjacent(t *testing.T) { - rc1 := rangeConstraint{ - min: newV(1, 0, 0), - max: newV(2, 0, 0), - } - rc2 := rangeConstraint{ - min: newV(1, 2, 0), - max: newV(2, 2, 0), - } - - if areAdjacent(rc1, rc2) { - t.Errorf("Ranges overlap, should not indicate as adjacent") - } - - rc2 = rangeConstraint{ - min: newV(2, 0, 0), - } - - if areAdjacent(rc1, rc2) { - t.Errorf("Ranges are non-overlapping and non-adjacent, but reported as adjacent") - } - - rc2.includeMin = true - - if !areAdjacent(rc1, rc2) { - t.Errorf("Ranges are non-overlapping and adjacent, but reported as non-adjacent") - } - - rc1.includeMax = true - - if areAdjacent(rc1, rc2) { - t.Errorf("Ranges are overlapping at a single version, but reported as adjacent") - } - - rc2.includeMin = false - if !areAdjacent(rc1, rc2) { - t.Errorf("Ranges are non-overlapping and adjacent, but reported as non-adjacent") - } -} diff --git a/installer/vendor/github.com/Masterminds/semver/version_test.go b/installer/vendor/github.com/Masterminds/semver/version_test.go deleted file mode 100644 index 1fae87f526..0000000000 --- a/installer/vendor/github.com/Masterminds/semver/version_test.go +++ /dev/null @@ -1,310 +0,0 @@ -package semver - -import ( - "testing" -) - -func TestNewVersion(t *testing.T) { - tests := []struct { - version string - err bool - }{ - {"1.2.3", false}, - {"v1.2.3", false}, - {"1.0", false}, - {"v1.0", false}, - {"1", false}, - {"v1", false}, - {"1.2.beta", true}, - {"v1.2.beta", true}, - {"foo", true}, - {"1.2-5", false}, - {"v1.2-5", false}, - {"1.2-beta.5", false}, - {"v1.2-beta.5", false}, - {"\n1.2", true}, - {"\nv1.2", true}, - {"1.2.0-x.Y.0+metadata", false}, - {"v1.2.0-x.Y.0+metadata", false}, - {"1.2.0-x.Y.0+metadata-width-hypen", false}, - {"v1.2.0-x.Y.0+metadata-width-hypen", false}, - {"1.2.3-rc1-with-hypen", false}, - {"v1.2.3-rc1-with-hypen", false}, - {"1.2.3.4", true}, - {"v1.2.3.4", true}, - } - - for _, tc := range tests { - _, err := NewVersion(tc.version) - if tc.err && err == nil { - t.Fatalf("expected error for version: %s", tc.version) - } else if !tc.err && err != nil { - t.Fatalf("error for version %s: %s", tc.version, err) - } - } -} - -func TestOriginal(t *testing.T) { - tests := []string{ - "1.2.3", - "v1.2.3", - "1.0", - "v1.0", - "1", - "v1", - "1.2-5", - "v1.2-5", - "1.2-beta.5", - "v1.2-beta.5", - "1.2.0-x.Y.0+metadata", - "v1.2.0-x.Y.0+metadata", - "1.2.0-x.Y.0+metadata-width-hypen", - "v1.2.0-x.Y.0+metadata-width-hypen", - "1.2.3-rc1-with-hypen", - "v1.2.3-rc1-with-hypen", - } - - for _, tc := range tests { - v, err := NewVersion(tc) - if err != nil { - t.Errorf("Error parsing version %s", tc) - } - - o := v.Original() - if o != tc { - t.Errorf("Error retrieving originl. Expected '%s' but got '%s'", tc, v) - } - } -} - -func TestParts(t *testing.T) { - v, err := NewVersion("1.2.3-beta.1+build.123") - if err != nil { - t.Error("Error parsing version 1.2.3-beta.1+build.123") - } - - if v.Major() != 1 { - t.Error("Major() returning wrong value") - } - if v.Minor() != 2 { - t.Error("Minor() returning wrong value") - } - if v.Patch() != 3 { - t.Error("Patch() returning wrong value") - } - if v.Prerelease() != "beta.1" { - t.Error("Prerelease() returning wrong value") - } - if v.Metadata() != "build.123" { - t.Error("Metadata() returning wrong value") - } -} - -func TestString(t *testing.T) { - tests := []struct { - version string - expected string - }{ - {"1.2.3", "1.2.3"}, - {"v1.2.3", "1.2.3"}, - {"1.0", "1.0.0"}, - {"v1.0", "1.0.0"}, - {"1", "1.0.0"}, - {"v1", "1.0.0"}, - {"1.2-5", "1.2.0-5"}, - {"v1.2-5", "1.2.0-5"}, - {"1.2-beta.5", "1.2.0-beta.5"}, - {"v1.2-beta.5", "1.2.0-beta.5"}, - {"1.2.0-x.Y.0+metadata", "1.2.0-x.Y.0+metadata"}, - {"v1.2.0-x.Y.0+metadata", "1.2.0-x.Y.0+metadata"}, - {"1.2.0-x.Y.0+metadata-width-hypen", "1.2.0-x.Y.0+metadata-width-hypen"}, - {"v1.2.0-x.Y.0+metadata-width-hypen", "1.2.0-x.Y.0+metadata-width-hypen"}, - {"1.2.3-rc1-with-hypen", "1.2.3-rc1-with-hypen"}, - {"v1.2.3-rc1-with-hypen", "1.2.3-rc1-with-hypen"}, - } - - for _, tc := range tests { - v, err := NewVersion(tc.version) - if err != nil { - t.Errorf("Error parsing version %s", tc) - } - - s := v.String() - if s != tc.expected { - t.Errorf("Error generating string. Expected '%s' but got '%s'", tc.expected, s) - } - } -} - -func TestCompare(t *testing.T) { - tests := []struct { - v1 string - v2 string - expected int - }{ - {"1.2.3", "1.5.1", -1}, - {"2.2.3", "1.5.1", 1}, - {"2.2.3", "2.2.2", 1}, - {"3.2-beta", "3.2-beta", 0}, - {"1.3", "1.1.4", 1}, - {"4.2", "4.2-beta", 1}, - {"4.2-beta", "4.2", -1}, - {"4.2-alpha", "4.2-beta", -1}, - {"4.2-alpha", "4.2-alpha", 0}, - {"4.2-beta.2", "4.2-beta.1", 1}, - {"4.2-beta2", "4.2-beta1", 1}, - {"4.2-beta", "4.2-beta.2", -1}, - {"4.2-beta", "4.2-beta.foo", 1}, - {"4.2-beta.2", "4.2-beta", 1}, - {"4.2-beta.foo", "4.2-beta", -1}, - {"1.2+bar", "1.2+baz", 0}, - } - - for _, tc := range tests { - v1, err := NewVersion(tc.v1) - if err != nil { - t.Errorf("Error parsing version: %s", err) - } - - v2, err := NewVersion(tc.v2) - if err != nil { - t.Errorf("Error parsing version: %s", err) - } - - a := v1.Compare(v2) - e := tc.expected - if a != e { - t.Errorf( - "Comparison of '%s' and '%s' failed. Expected '%d', got '%d'", - tc.v1, tc.v2, e, a, - ) - } - } - - // One-off tests for special version comparisons - zero := Version{special: zeroVersion} - inf := Version{special: infiniteVersion} - - if zero.Compare(inf) != -1 { - t.Error("Zero version should always be less than infinite version") - } - if zero.Compare(zero) != 0 { - t.Error("Zero version should equal itself") - } - if inf.Compare(zero) != 1 { - t.Error("Infinite version should always be greater than zero version") - } - if inf.Compare(inf) != 0 { - t.Error("Infinite version should equal itself") - } - - // Need to work vs. a normal version, too. - v := Version{} - - if zero.Compare(v) != -1 { - t.Error("Zero version should always be less than any normal version") - } - if inf.Compare(v) != 1 { - t.Error("Infinite version should always be greater than any normal version") - } -} - -func TestLessThan(t *testing.T) { - tests := []struct { - v1 string - v2 string - expected bool - }{ - {"1.2.3", "1.5.1", true}, - {"2.2.3", "1.5.1", false}, - {"3.2-beta", "3.2-beta", false}, - } - - for _, tc := range tests { - v1, err := NewVersion(tc.v1) - if err != nil { - t.Errorf("Error parsing version: %s", err) - } - - v2, err := NewVersion(tc.v2) - if err != nil { - t.Errorf("Error parsing version: %s", err) - } - - a := v1.LessThan(v2) - e := tc.expected - if a != e { - t.Errorf( - "Comparison of '%s' and '%s' failed. Expected '%t', got '%t'", - tc.v1, tc.v2, e, a, - ) - } - } -} - -func TestGreaterThan(t *testing.T) { - tests := []struct { - v1 string - v2 string - expected bool - }{ - {"1.2.3", "1.5.1", false}, - {"2.2.3", "1.5.1", true}, - {"3.2-beta", "3.2-beta", false}, - } - - for _, tc := range tests { - v1, err := NewVersion(tc.v1) - if err != nil { - t.Errorf("Error parsing version: %s", err) - } - - v2, err := NewVersion(tc.v2) - if err != nil { - t.Errorf("Error parsing version: %s", err) - } - - a := v1.GreaterThan(v2) - e := tc.expected - if a != e { - t.Errorf( - "Comparison of '%s' and '%s' failed. Expected '%t', got '%t'", - tc.v1, tc.v2, e, a, - ) - } - } -} - -func TestEqual(t *testing.T) { - tests := []struct { - v1 string - v2 string - expected bool - }{ - {"1.2.3", "1.5.1", false}, - {"2.2.3", "1.5.1", false}, - {"3.2-beta", "3.2-beta", true}, - {"3.2-beta+foo", "3.2-beta+bar", true}, - } - - for _, tc := range tests { - v1, err := NewVersion(tc.v1) - if err != nil { - t.Errorf("Error parsing version: %s", err) - } - - v2, err := NewVersion(tc.v2) - if err != nil { - t.Errorf("Error parsing version: %s", err) - } - - a := v1.Equal(v2) - e := tc.expected - if a != e { - t.Errorf( - "Comparison of '%s' and '%s' failed. Expected '%t', got '%t'", - tc.v1, tc.v2, e, a, - ) - } - } -} diff --git a/installer/vendor/github.com/Masterminds/vcs/.gitignore b/installer/vendor/github.com/Masterminds/vcs/.gitignore deleted file mode 100644 index daf913b1b3..0000000000 --- a/installer/vendor/github.com/Masterminds/vcs/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/installer/vendor/github.com/Masterminds/vcs/.travis.yml b/installer/vendor/github.com/Masterminds/vcs/.travis.yml deleted file mode 100644 index f54b68d6c6..0000000000 --- a/installer/vendor/github.com/Masterminds/vcs/.travis.yml +++ /dev/null @@ -1,30 +0,0 @@ -language: go - -go: - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - master - -before_script: - - git version - - svn --version - -# Setting sudo access to false will let Travis CI use containers rather than -# VMs to run the tests. For more details see: -# - http://docs.travis-ci.com/user/workers/container-based-infrastructure/ -# - http://docs.travis-ci.com/user/workers/standard-infrastructure/ -sudo: false - -script: - - make setup - - make test - -notifications: - webhooks: - urls: - - https://webhooks.gitter.im/e/06e3328629952dabe3e0 - on_success: change # options: [always|never|change] default: always - on_failure: always # options: [always|never|change] default: always - on_start: never # options: [always|never|change] default: always diff --git a/installer/vendor/github.com/Masterminds/vcs/CHANGELOG.md b/installer/vendor/github.com/Masterminds/vcs/CHANGELOG.md deleted file mode 100644 index ff0f8280b4..0000000000 --- a/installer/vendor/github.com/Masterminds/vcs/CHANGELOG.md +++ /dev/null @@ -1,170 +0,0 @@ -# Changelog - -## 1.12.0 (2017-09-11) - -### Changed - -- #79: Include the error context in the error string (thanks @guywithnose) -- #80: Bump the Go versions for Travis CI testing (thanks @AlekSi) - -## 1.11.1 (2017-04-28) - -### Fixed - -- #76: Fix submodule handling for Windows (thanks @m0j0hn) - -## 1.11.0 (2017-03-23) - -### Added - -- #65: Exposed CmdFromDir function (thanks @erizocosmico) - -### Changed - -- #69: Updated testing for Go 1.8 - -### Fixed - -- #64: Testing fatal error if bzr not installed (thanks @kevinburke) - -## 1.10.2 (2017-01-24) - -### Fixed - -- #63: Remove extra quotes in submodule export (thanks @dt) - -## 1.10.1 (2017-01-18) - -### Fixed - -- #62: Added windows testing via appveyor and fixed issues under windows. - -## 1.10.0 (2017-01-09) - -### Added - -- #60: Handle Git submodules (thanks @sdboyer) -- #61: Add gometalinter to testing - -## 1.9.0 (2016-11-18) - -### Added - -- #50: Auto-detect remotes with file:// prefix. -- #59: Testing against Go 1.7 - -### Changed - -- Removed auto-detection for Google Code as the service is deprecated -- Added auto-detection of git.openstack.org - -### Fixed - -- #53: Git not fetching tags off branch - -## 1.8.0 (2016-06-29) - -### Added - -- #43: Detect when tool (e.g., git, svn, etc) not installed -- #49: Detect access denied and not found situations - -### Changed - -- #48: Updated Go Report Gard url to new format -- Refactored SVN handling to detect when not in a top level directory -- Updating tagging to v[SemVer] structure for compatibility with other tools. - -### Fixed - -- #45: Fixed hg's update method so that it pulls from remote before updates - -## 1.7.0 (2016-05-05) - -- Adds a glide.yaml file with some limited information. -- Implements #37: Ability to export source as a directory. -- Implements #36: Get current version-ish with Current method. This returns - a branch (if on tip) or equivalent tip, a tag if on a tag, or a revision if - on an individual revision. Note, the tip of branch is VCS specific so usage - may require detecting VCS type. - -## 1.6.1 (2016-04-27) - -- Fixed #30: tags from commit should not have ^{} appended (seen in git) -- Fixed #29: isDetachedHead fails with non-english locales (git) -- Fixed #33: Access denied and not found http errors causing xml parsing errors - -## 1.6.0 (2016-04-18) - -- Issue #26: Added Init method to initialize a repo at the local location - (thanks tony). -- Issue #19: Added method to retrieve tags for a commit. -- Issue #24: Reworked errors returned from common methods. Now differing - VCS implementations return the same errors. The original VCS specific error - is available on the error. See the docs for more details. -- Issue #25: Export the function RunFromDir which runs VCS commands from the - root of the local directory. This is useful for those that want to build and - extend on top of the vcs package (thanks tony). -- Issue #22: Added Ping command to test if remote location is present and - accessible. - -## 1.5.1 (2016-03-23) - -- Fixing bug parsing some Git commit dates. - -## 1.5.0 (2016-03-22) - -- Add Travis CI testing for Go 1.6. -- Issue #17: Add CommitInfo method allowing for a common way to get commit - metadata from all VCS. -- Autodetect types that have git@ or hg@ users. -- Autodetect git+ssh, bzr+ssh, git, and svn+ssh scheme urls. -- On Bitbucket for ssh style URLs retrieve the type from the URL. This allows - for private repo type detection. -- Issue #14: Autodetect ssh/scp style urls (thanks chonthu). - -## 1.4.1 (2016-03-07) - -- Fixes #16: some windows situations are unable to create parent directory. - -## 1.4.0 (2016-02-15) - -- Adding support for IBM JazzHub. - -## 1.3.1 (2016-01-27) - -- Issue #12: Failed to checkout Bzr repo when parent directory didn't - exist (thanks cyrilleverrier). - -## 1.3.0 (2015-11-09) - -- Issue #9: Added Date method to get the date/time of latest commit (thanks kamilchm). - -## 1.2.0 (2015-10-29) - -- Adding IsDirty method to detect a checkout with uncommitted changes. - -## 1.1.4 (2015-10-28) - -- Fixed #8: Git IsReference not detecting branches that have not been checked - out yet. - -## 1.1.3 (2015-10-21) - -- Fixing issue where there are multiple go-import statements for go redirects - -## 1.1.2 (2015-10-20) - -- Fixes #7: hg not checking out code when Get is called - -## 1.1.1 (2015-10-20) - -- Issue #6: Allow VCS commands to be run concurrently. - -## 1.1.0 (2015-10-19) - -- #5: Added output of failed command to returned errors. - -## 1.0.0 (2015-10-06) - -- Initial release. diff --git a/installer/vendor/github.com/Masterminds/vcs/Makefile b/installer/vendor/github.com/Masterminds/vcs/Makefile deleted file mode 100644 index 5d722c2f4b..0000000000 --- a/installer/vendor/github.com/Masterminds/vcs/Makefile +++ /dev/null @@ -1,41 +0,0 @@ -.PHONY: setup -setup: - go get -u gopkg.in/alecthomas/gometalinter.v1 - gometalinter.v1 --install - -.PHONY: test -test: validate lint - @echo "==> Running tests" - go test -v - -.PHONY: validate -validate: -# misspell finds the work adresář (used in bzr.go) as a mispelling of -# address. It finds adres. An issue has been filed at -# https://github.com/client9/misspell/issues/99. In the meantime adding -# adres to the ignore list. - @echo "==> Running static validations" - @gometalinter.v1 \ - --disable-all \ - --linter "misspell:misspell -i adres -j 1 {path}/*.go:PATH:LINE:COL:MESSAGE" \ - --enable deadcode \ - --severity deadcode:error \ - --enable gofmt \ - --enable gosimple \ - --enable ineffassign \ - --enable misspell \ - --enable vet \ - --tests \ - --vendor \ - --deadline 60s \ - ./... || exit_code=1 - -.PHONY: lint -lint: - @echo "==> Running linters" - @gometalinter.v1 \ - --disable-all \ - --enable golint \ - --vendor \ - --deadline 60s \ - ./... || : diff --git a/installer/vendor/github.com/Masterminds/vcs/README.md b/installer/vendor/github.com/Masterminds/vcs/README.md deleted file mode 100644 index a11268513b..0000000000 --- a/installer/vendor/github.com/Masterminds/vcs/README.md +++ /dev/null @@ -1,48 +0,0 @@ -# VCS Repository Management for Go - -Manage repos in varying version control systems with ease through a common -interface. - -[![Build Status](https://travis-ci.org/Masterminds/vcs.svg)](https://travis-ci.org/Masterminds/vcs) [![GoDoc](https://godoc.org/github.com/Masterminds/vcs?status.png)](https://godoc.org/github.com/Masterminds/vcs) [![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/vcs)](https://goreportcard.com/report/github.com/Masterminds/vcs) -[![Build status](https://ci.appveyor.com/api/projects/status/vg3cjc561q2trobm?svg=true&passingText=windows%20build%20passing&failingText=windows%20build%20failing)](https://ci.appveyor.com/project/mattfarina/vcs) - - -## Quick Usage - -Quick usage: - - remote := "https://github.com/Masterminds/vcs" - local, _ := ioutil.TempDir("", "go-vcs") - repo, err := NewRepo(remote, local) - -In this case `NewRepo` will detect the VCS is Git and return a `GitRepo`. All of -the repos implement the `Repo` interface with a common set of features between -them. - -## Supported VCS - -Git, SVN, Bazaar (Bzr), and Mercurial (Hg) are currently supported. They each -have their own type (e.g., `GitRepo`) that follow a simple naming pattern. Each -type implements the `Repo` interface and has a constructor (e.g., `NewGitRepo`). -The constructors have the same signature as `NewRepo`. - -## Features - -- Clone or checkout a repository depending on the version control system. -- Pull updates to a repository. -- Get the currently checked out commit id. -- Checkout a commit id, branch, or tag (depending on the availability in the VCS). -- Get a list of tags and branches in the VCS. -- Check if a string value is a valid reference within the VCS. -- More... - -For more details see [the documentation](https://godoc.org/github.com/Masterminds/vcs). - -## Motivation - -The package `golang.org/x/tools/go/vcs` provides some valuable functionality -for working with packages in repositories in varying source control management -systems. That package, while useful and well tested, is designed with a specific -purpose in mind. Our uses went beyond the scope of that package. To implement -our scope we built a package that went beyond the functionality and scope -of `golang.org/x/tools/go/vcs`. diff --git a/installer/vendor/github.com/Masterminds/vcs/appveyor.yml b/installer/vendor/github.com/Masterminds/vcs/appveyor.yml deleted file mode 100644 index c0c9170fa7..0000000000 --- a/installer/vendor/github.com/Masterminds/vcs/appveyor.yml +++ /dev/null @@ -1,26 +0,0 @@ - -version: build-{build}.{branch} - -clone_folder: C:\gopath\src\github.com\Masterminds\vcs -shallow_clone: true - -environment: - GOPATH: C:\gopath - -platform: - - x64 - -install: - - go version - - go env - - choco install -y bzr - - set PATH=C:\Program Files (x86)\Bazaar;%PATH% - - bzr --version - -build_script: - - go install -v ./... - -test_script: - - go test -v - -deploy: off diff --git a/installer/vendor/github.com/Masterminds/vcs/bzr_test.go b/installer/vendor/github.com/Masterminds/vcs/bzr_test.go deleted file mode 100644 index 4b2e50ec60..0000000000 --- a/installer/vendor/github.com/Masterminds/vcs/bzr_test.go +++ /dev/null @@ -1,328 +0,0 @@ -package vcs - -import ( - "io/ioutil" - "path/filepath" - "time" - //"log" - "os" - "testing" -) - -// Canary test to ensure BzrRepo implements the Repo interface. -var _ Repo = &BzrRepo{} - -// To verify bzr is working we perform integration testing -// with a known bzr service. Due to the long time of repeatedly checking out -// repos these tests are structured to work together. - -func TestBzr(t *testing.T) { - - tempDir, err := ioutil.TempDir("", "go-vcs-bzr-tests") - if err != nil { - t.Error(err) - } - defer func() { - err = os.RemoveAll(tempDir) - if err != nil { - t.Error(err) - } - }() - - repo, err := NewBzrRepo("https://launchpad.net/govcstestbzrrepo", tempDir+"/govcstestbzrrepo") - if err != nil { - t.Fatal(err) - } - - if repo.Vcs() != Bzr { - t.Error("Bzr is detecting the wrong type") - } - - // Check the basic getters. - if repo.Remote() != "https://launchpad.net/govcstestbzrrepo" { - t.Error("Remote not set properly") - } - if repo.LocalPath() != tempDir+"/govcstestbzrrepo" { - t.Error("Local disk location not set properly") - } - - //Logger = log.New(os.Stdout, "", log.LstdFlags) - - // Do an initial clone. - err = repo.Get() - if err != nil { - t.Errorf("Unable to clone Bzr repo. Err was %s", err) - } - - // Verify Bzr repo is a Bzr repo - if !repo.CheckLocal() { - t.Error("Problem checking out repo or Bzr CheckLocal is not working") - } - - // Test internal lookup mechanism used outside of Bzr specific functionality. - ltype, err := DetectVcsFromFS(tempDir + "/govcstestbzrrepo") - if err != nil { - t.Error("detectVcsFromFS unable to Bzr repo") - } - if ltype != Bzr { - t.Errorf("detectVcsFromFS detected %s instead of Bzr type", ltype) - } - - // Test NewRepo on existing checkout. This should simply provide a working - // instance without error based on looking at the local directory. - nrepo, nrerr := NewRepo("https://launchpad.net/govcstestbzrrepo", tempDir+"/govcstestbzrrepo") - if nrerr != nil { - t.Error(nrerr) - } - // Verify the right oject is returned. It will check the local repo type. - if !nrepo.CheckLocal() { - t.Error("Wrong version returned from NewRepo") - } - - v, err := repo.Current() - if err != nil { - t.Errorf("Error trying Bzr Current: %s", err) - } - if v != "-1" { - t.Errorf("Current failed to detect Bzr on tip of branch. Got version: %s", v) - } - - err = repo.UpdateVersion("2") - if err != nil { - t.Errorf("Unable to update Bzr repo version. Err was %s", err) - } - - // Use Version to verify we are on the right version. - v, err = repo.Version() - if v != "2" { - t.Error("Error checking checked out Bzr version") - } - if err != nil { - t.Error(err) - } - - v, err = repo.Current() - if err != nil { - t.Errorf("Error trying Bzr Current: %s", err) - } - if v != "2" { - t.Errorf("Current failed to detect Bzr on rev 2 of branch. Got version: %s", v) - } - - // Use Date to verify we are on the right commit. - d, err := repo.Date() - if d.Format(longForm) != "2015-07-31 09:50:42 -0400" { - t.Error("Error checking checked out Bzr commit date") - } - if err != nil { - t.Error(err) - } - - // Perform an update. - err = repo.Update() - if err != nil { - t.Error(err) - } - - v, err = repo.Version() - if v != "3" { - t.Error("Error checking checked out Bzr version") - } - if err != nil { - t.Error(err) - } - - tags, err := repo.Tags() - if err != nil { - t.Error(err) - } - if tags[0] != "1.0.0" { - t.Error("Bzr tags is not reporting the correct version") - } - - tags, err = repo.TagsFromCommit("2") - if err != nil { - t.Error(err) - } - if len(tags) != 0 { - t.Error("Bzr is incorrectly returning tags for a commit") - } - - tags, err = repo.TagsFromCommit("3") - if err != nil { - t.Error(err) - } - if len(tags) != 1 || tags[0] != "1.0.0" { - t.Error("Bzr is incorrectly returning tags for a commit") - } - - branches, err := repo.Branches() - if err != nil { - t.Error(err) - } - if len(branches) != 0 { - t.Error("Bzr is incorrectly returning branches") - } - - if !repo.IsReference("1.0.0") { - t.Error("Bzr is reporting a reference is not one") - } - - if repo.IsReference("foo") { - t.Error("Bzr is reporting a non-existent reference is one") - } - - if repo.IsDirty() { - t.Error("Bzr incorrectly reporting dirty") - } - - ci, err := repo.CommitInfo("3") - if err != nil { - t.Error(err) - } - if ci.Commit != "3" { - t.Error("Bzr.CommitInfo wrong commit id") - } - if ci.Author != "Matt Farina " { - t.Error("Bzr.CommitInfo wrong author") - } - if ci.Message != "Updated Readme with pointer." { - t.Error("Bzr.CommitInfo wrong message") - } - ti, err := time.Parse(time.RFC1123Z, "Fri, 31 Jul 2015 09:51:37 -0400") - if err != nil { - t.Error(err) - } - if !ti.Equal(ci.Date) { - t.Error("Bzr.CommitInfo wrong date") - } - - _, err = repo.CommitInfo("asdfasdfasdf") - if err != ErrRevisionUnavailable { - t.Error("Bzr didn't return expected ErrRevisionUnavailable") - } - - tempDir2, err := ioutil.TempDir("", "go-vcs-bzr-tests-export") - if err != nil { - t.Fatalf("Error creating temp directory: %s", err) - } - defer func() { - err = os.RemoveAll(tempDir2) - if err != nil { - t.Error(err) - } - }() - - exportDir := filepath.Join(tempDir2, "src") - - err = repo.ExportDir(exportDir) - if err != nil { - t.Errorf("Unable to export Bzr repo. Err was %s", err) - } - - _, err = os.Stat(filepath.Join(exportDir, "Readme.md")) - if err != nil { - t.Errorf("Error checking exported file in Bzr: %s", err) - } - - _, err = os.Stat(filepath.Join(exportDir, string(repo.Vcs()))) - if err != nil { - if found := os.IsNotExist(err); !found { - t.Errorf("Error checking exported metadata in Bzr: %s", err) - } - } else { - t.Error("Error checking Bzr metadata. It exists.") - } -} - -func TestBzrCheckLocal(t *testing.T) { - // Verify repo.CheckLocal fails for non-Bzr directories. - // TestBzr is already checking on a valid repo - tempDir, err := ioutil.TempDir("", "go-vcs-bzr-tests") - if err != nil { - t.Error(err) - } - defer func() { - err = os.RemoveAll(tempDir) - if err != nil { - t.Error(err) - } - }() - - repo, _ := NewBzrRepo("", tempDir) - if repo.CheckLocal() { - t.Error("Bzr CheckLocal does not identify non-Bzr location") - } - - // Test NewRepo when there's no local. This should simply provide a working - // instance without error based on looking at the remote localtion. - _, nrerr := NewRepo("https://launchpad.net/govcstestbzrrepo", tempDir+"/govcstestbzrrepo") - if nrerr != nil { - t.Error(nrerr) - } -} - -func TestBzrPing(t *testing.T) { - tempDir, err := ioutil.TempDir("", "go-vcs-bzr-tests") - if err != nil { - t.Error(err) - } - defer func() { - err = os.RemoveAll(tempDir) - if err != nil { - t.Error(err) - } - }() - - repo, err := NewBzrRepo("https://launchpad.net/govcstestbzrrepo", tempDir) - if err != nil { - t.Error(err) - } - - ping := repo.Ping() - if !ping { - t.Error("Bzr unable to ping working repo") - } - - repo, err = NewBzrRepo("https://launchpad.net/ihopethisneverexistsbecauseitshouldnt", tempDir) - if err != nil { - t.Error(err) - } - - ping = repo.Ping() - if ping { - t.Error("Bzr got a ping response from when it should not have") - } -} - -func TestBzrInit(t *testing.T) { - tempDir, err := ioutil.TempDir("", "go-vcs-bzr-tests") - repoDir := tempDir + "/repo" - if err != nil { - t.Error(err) - } - defer func() { - err = os.RemoveAll(tempDir) - if err != nil { - t.Error(err) - } - }() - - repo, err := NewBzrRepo(repoDir, repoDir) - if err != nil { - t.Error(err) - } - - err = repo.Init() - if err != nil { - t.Error(err) - } - - v, err := repo.Version() - if err != nil { - t.Error(err) - } - if v != "0" { - t.Errorf("Bzr Init returns wrong version: %s", v) - } -} diff --git a/installer/vendor/github.com/Masterminds/vcs/errors_test.go b/installer/vendor/github.com/Masterminds/vcs/errors_test.go deleted file mode 100644 index 2effd7ccab..0000000000 --- a/installer/vendor/github.com/Masterminds/vcs/errors_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package vcs - -import ( - "errors" - "testing" -) - -func TestNewRemoteError(t *testing.T) { - base := errors.New("Foo error") - out := "This is a test" - msg := "remote error msg" - - e := NewRemoteError(msg, base, out) - - switch e.(type) { - case *RemoteError: - // This is the right error type - default: - t.Error("Wrong error type returned from NewRemoteError") - } -} - -func TestNewLocalError(t *testing.T) { - base := errors.New("Foo error") - out := "This is a test" - msg := "local error msg" - - e := NewLocalError(msg, base, out) - - switch e.(type) { - case *LocalError: - // This is the right error type - default: - t.Error("Wrong error type returned from NewLocalError") - } -} diff --git a/installer/vendor/github.com/Masterminds/vcs/git_test.go b/installer/vendor/github.com/Masterminds/vcs/git_test.go deleted file mode 100644 index b58c2c2efd..0000000000 --- a/installer/vendor/github.com/Masterminds/vcs/git_test.go +++ /dev/null @@ -1,599 +0,0 @@ -package vcs - -import ( - "fmt" - "io/ioutil" - "path/filepath" - "time" - //"log" - "os" - "testing" -) - -// Canary test to ensure GitRepo implements the Repo interface. -var _ Repo = &GitRepo{} - -// To verify git is working we perform integration testing -// with a known git service. - -func TestGit(t *testing.T) { - tempDir, err := ioutil.TempDir("", "go-vcs-git-tests") - if err != nil { - t.Error(err) - } - defer func() { - err = os.RemoveAll(tempDir) - if err != nil { - t.Error(err) - } - }() - - repo, err := NewGitRepo("https://github.com/Masterminds/VCSTestRepo", tempDir+"/VCSTestRepo") - if err != nil { - t.Error(err) - } - - if repo.Vcs() != Git { - t.Error("Git is detecting the wrong type") - } - - // Check the basic getters. - if repo.Remote() != "https://github.com/Masterminds/VCSTestRepo" { - t.Error("Remote not set properly") - } - if repo.LocalPath() != tempDir+"/VCSTestRepo" { - t.Error("Local disk location not set properly") - } - - //Logger = log.New(os.Stdout, "", log.LstdFlags) - - // Do an initial clone. - err = repo.Get() - if err != nil { - t.Errorf("Unable to clone Git repo. Err was %s", err) - } - - // Verify Git repo is a Git repo - if !repo.CheckLocal() { - t.Error("Problem checking out repo or Git CheckLocal is not working") - } - - // Test internal lookup mechanism used outside of Git specific functionality. - ltype, err := DetectVcsFromFS(tempDir + "/VCSTestRepo") - if err != nil { - t.Error("detectVcsFromFS unable to Git repo") - } - if ltype != Git { - t.Errorf("detectVcsFromFS detected %s instead of Git type", ltype) - } - - // Test NewRepo on existing checkout. This should simply provide a working - // instance without error based on looking at the local directory. - nrepo, nrerr := NewRepo("https://github.com/Masterminds/VCSTestRepo", tempDir+"/VCSTestRepo") - if nrerr != nil { - t.Error(nrerr) - } - // Verify the right oject is returned. It will check the local repo type. - if !nrepo.CheckLocal() { - t.Error("Wrong version returned from NewRepo") - } - - // Perform an update. - err = repo.Update() - if err != nil { - t.Error(err) - } - - v, err := repo.Current() - if err != nil { - t.Errorf("Error trying Git Current: %s", err) - } - if v != "master" { - t.Errorf("Current failed to detect Git on tip of master. Got version: %s", v) - } - - // Set the version using the short hash. - err = repo.UpdateVersion("806b07b") - if err != nil { - t.Errorf("Unable to update Git repo version. Err was %s", err) - } - - // Once a ref has been checked out the repo is in a detached head state. - // Trying to pull in an update in this state will cause an error. Update - // should cleanly handle this. Pulling on a branch (tested elsewhere) and - // skipping that here. - err = repo.Update() - if err != nil { - t.Error(err) - } - - // Use Version to verify we are on the right version. - v, err = repo.Version() - if v != "806b07b08faa21cfbdae93027904f80174679402" { - t.Error("Error checking checked out Git version") - } - if err != nil { - t.Error(err) - } - - v, err = repo.Current() - if err != nil { - t.Errorf("Error trying Git Current for ref: %s", err) - } - if v != "806b07b08faa21cfbdae93027904f80174679402" { - t.Errorf("Current failed to detect Git on ref of branch. Got version: %s", v) - } - - // Use Date to verify we are on the right commit. - d, err := repo.Date() - if d.Format(longForm) != "2015-07-29 09:46:39 -0400" { - t.Error("Error checking checked out Git commit date") - } - if err != nil { - t.Error(err) - } - - // Verify that we can set the version something other than short hash - err = repo.UpdateVersion("master") - if err != nil { - t.Errorf("Unable to update Git repo version. Err was %s", err) - } - err = repo.UpdateVersion("806b07b08faa21cfbdae93027904f80174679402") - if err != nil { - t.Errorf("Unable to update Git repo version. Err was %s", err) - } - v, err = repo.Version() - if v != "806b07b08faa21cfbdae93027904f80174679402" { - t.Error("Error checking checked out Git version") - } - if err != nil { - t.Error(err) - } - - tags, err := repo.Tags() - if err != nil { - t.Error(err) - } - - var hasRelTag bool - var hasOffMasterTag bool - - for _, tv := range tags { - if tv == "1.0.0" { - hasRelTag = true - } else if tv == "off-master-tag" { - hasOffMasterTag = true - } - } - - if !hasRelTag { - t.Error("Git tags unable to find release tag on master") - } - if !hasOffMasterTag { - t.Error("Git tags did not fetch tags not on master") - } - - tags, err = repo.TagsFromCommit("74dd547545b7df4aa285bcec1b54e2b76f726395") - if err != nil { - t.Error(err) - } - if len(tags) != 0 { - t.Error("Git is incorrectly returning tags for a commit") - } - - tags, err = repo.TagsFromCommit("30605f6ac35fcb075ad0bfa9296f90a7d891523e") - if err != nil { - t.Error(err) - } - if len(tags) != 1 || tags[0] != "1.0.0" { - t.Error("Git is incorrectly returning tags for a commit") - } - - branches, err := repo.Branches() - if err != nil { - t.Error(err) - } - // The branches should be HEAD, master, other, and test. - if branches[3] != "test" { - t.Error("Git is incorrectly returning branches") - } - - if !repo.IsReference("1.0.0") { - t.Error("Git is reporting a reference is not one") - } - - if repo.IsReference("foo") { - t.Error("Git is reporting a non-existent reference is one") - } - - if repo.IsDirty() { - t.Error("Git incorrectly reporting dirty") - } - - ci, err := repo.CommitInfo("806b07b08faa21cfbdae93027904f80174679402") - if err != nil { - t.Error(err) - } - if ci.Commit != "806b07b08faa21cfbdae93027904f80174679402" { - t.Error("Git.CommitInfo wrong commit id") - } - if ci.Author != "Matt Farina " { - t.Error("Git.CommitInfo wrong author") - } - if ci.Message != "Update README.md" { - t.Error("Git.CommitInfo wrong message") - } - ti, err := time.Parse(time.RFC1123Z, "Wed, 29 Jul 2015 09:46:39 -0400") - if err != nil { - t.Error(err) - } - if !ti.Equal(ci.Date) { - t.Error("Git.CommitInfo wrong date") - } - - _, err = repo.CommitInfo("asdfasdfasdf") - if err != ErrRevisionUnavailable { - t.Error("Git didn't return expected ErrRevisionUnavailable") - } - - tempDir2, err := ioutil.TempDir("", "go-vcs-git-tests-export") - if err != nil { - t.Fatalf("Error creating temp directory: %s", err) - } - defer func() { - err = os.RemoveAll(tempDir2) - if err != nil { - t.Error(err) - } - }() - - exportDir := filepath.Join(tempDir2, "src") - - err = repo.ExportDir(exportDir) - if err != nil { - t.Errorf("Unable to export Git repo. Err was %s", err) - } - - _, err = os.Stat(filepath.Join(exportDir, "README.md")) - if err != nil { - t.Errorf("Error checking exported file in Git: %s", err) - } - - _, err = os.Stat(filepath.Join(exportDir, string(repo.Vcs()))) - if err != nil { - if found := os.IsNotExist(err); !found { - t.Errorf("Error checking exported metadata in Git: %s", err) - } - } else { - t.Error("Error checking Git metadata. It exists.") - } -} - -func TestGitCheckLocal(t *testing.T) { - // Verify repo.CheckLocal fails for non-Git directories. - // TestGit is already checking on a valid repo - tempDir, err := ioutil.TempDir("", "go-vcs-git-tests") - if err != nil { - t.Error(err) - } - defer func() { - err = os.RemoveAll(tempDir) - if err != nil { - t.Error(err) - } - }() - - repo, _ := NewGitRepo("", tempDir) - if repo.CheckLocal() { - t.Error("Git CheckLocal does not identify non-Git location") - } - - // Test NewRepo when there's no local. This should simply provide a working - // instance without error based on looking at the remote localtion. - _, nrerr := NewRepo("https://github.com/Masterminds/VCSTestRepo", tempDir+"/VCSTestRepo") - if nrerr != nil { - t.Error(nrerr) - } -} - -func TestGitPing(t *testing.T) { - tempDir, err := ioutil.TempDir("", "go-vcs-git-tests") - if err != nil { - t.Error(err) - } - defer func() { - err = os.RemoveAll(tempDir) - if err != nil { - t.Error(err) - } - }() - - repo, err := NewGitRepo("https://github.com/Masterminds/VCSTestRepo", tempDir) - if err != nil { - t.Error(err) - } - - ping := repo.Ping() - if !ping { - t.Error("Git unable to ping working repo") - } - - repo, err = NewGitRepo("https://github.com/Masterminds/ihopethisneverexistsbecauseitshouldnt", tempDir) - if err != nil { - t.Error(err) - } - - ping = repo.Ping() - if ping { - t.Error("Git got a ping response from when it should not have") - } -} - -func TestGitInit(t *testing.T) { - tempDir, err := ioutil.TempDir("", "go-vcs-git-tests") - repoDir := tempDir + "/repo" - if err != nil { - t.Error(err) - } - defer func() { - err = os.RemoveAll(tempDir) - if err != nil { - t.Error(err) - } - }() - - repo, err := NewGitRepo(repoDir, repoDir) - if err != nil { - t.Error(err) - } - - err = repo.Init() - if err != nil { - t.Error(err) - } - - _, err = repo.RunFromDir("git", "status") - if err != nil { - t.Error(err) - } -} - -func TestGitSubmoduleHandling(t *testing.T) { - tempDir, err := ioutil.TempDir("", "go-vcs-git-submodule-tests") - if err != nil { - t.Fatal(err) - } - defer func() { - err = os.RemoveAll(tempDir) - if err != nil { - t.Error(err) - } - }() - - dumplocal := func(err error) string { - if terr, ok := err.(*LocalError); ok { - return fmt.Sprintf("msg: %s\norig: %s\nout: %s", terr.Error(), terr.Original(), terr.Out()) - } - return err.Error() - } - - subdirExists := func(dir ...string) bool { - _, err := os.Stat(filepath.Join(append([]string{tempDir}, dir...)...)) - return err == nil - } - - // Initial clone should get version with two submodules, each of which have - // their own submodule - repo, err := NewGitRepo("https://github.com/sdboyer/subm", tempDir) - if err != nil { - t.Fatal(dumplocal(err)) - } - err = repo.Get() - if err != nil { - t.Fatalf("unable to clone Git repo. Err was %s", dumplocal(err)) - } - - // Verify we are on the right version. - v, err := repo.Version() - if v != "18e3a5f6fc7f6d577e732e7a5ab2caf990efbf8f" { - t.Fatalf("did not start from expected rev, tests could fail - bailing out (got %s)", v) - } - if err != nil { - t.Fatal(dumplocal(err)) - } - - if !subdirExists("subm1", ".git") { - t.Fatal("subm1 submodule does not exist on initial clone/checkout") - } - if !subdirExists("subm1", "dep-test", ".git") { - t.Fatal("dep-test submodule nested under subm1 does not exist on initial clone/checkout") - } - - if !subdirExists("subm-again", ".git") { - t.Fatal("subm-again submodule does not exist on initial clone/checkout") - } - if !subdirExists("subm-again", "dep-test", ".git") { - t.Fatal("dep-test submodule nested under subm-again does not exist on initial clone/checkout") - } - - // Now switch to version with no submodules, make sure they all go away - err = repo.UpdateVersion("e677f82015f72ac1c8fafa66b5463163b3597af2") - if err != nil { - t.Fatalf("checking out needed version failed with err: %s", dumplocal(err)) - } - - if subdirExists("subm1") { - t.Fatal("checking out version without submodule did not clean up immediate submodules") - } - if subdirExists("subm1", "dep-test") { - t.Fatal("checking out version without submodule did not clean up nested submodules") - } - if subdirExists("subm-again") { - t.Fatal("checking out version without submodule did not clean up immediate submodules") - } - if subdirExists("subm-again", "dep-test") { - t.Fatal("checking out version without submodule did not clean up nested submodules") - } - - err = repo.UpdateVersion("aaf7aa1bc4c3c682cc530eca8f80417088ee8540") - if err != nil { - t.Fatalf("checking out needed version failed with err: %s", dumplocal(err)) - } - - if !subdirExists("subm1", ".git") { - t.Fatal("checking out version with immediate submodule did not set up git subrepo") - } - - err = repo.UpdateVersion("6cc4669af468f3b4f16e7e96275ad01ade5b522f") - if err != nil { - t.Fatalf("checking out needed version failed with err: %s", dumplocal(err)) - } - - if !subdirExists("subm1", "dep-test", ".git") { - t.Fatal("checking out version with nested submodule did not set up nested git subrepo") - } - - err = repo.UpdateVersion("aaf7aa1bc4c3c682cc530eca8f80417088ee8540") - if err != nil { - t.Fatalf("checking out needed version failed with err: %s", dumplocal(err)) - } - - if subdirExists("subm1", "dep-test") { - t.Fatal("rolling back to version without nested submodule did not clean up the nested submodule") - } - - err = repo.UpdateVersion("18e3a5f6fc7f6d577e732e7a5ab2caf990efbf8f") - if err != nil { - t.Fatalf("checking out needed version failed with err: %s", dumplocal(err)) - } - - if !subdirExists("subm1", ".git") { - t.Fatal("subm1 submodule does not exist after switch from other commit") - } - if !subdirExists("subm1", "dep-test", ".git") { - t.Fatal("dep-test submodule nested under subm1 does not exist after switch from other commit") - } - - if !subdirExists("subm-again", ".git") { - t.Fatal("subm-again submodule does not exist after switch from other commit") - } - if !subdirExists("subm-again", "dep-test", ".git") { - t.Fatal("dep-test submodule nested under subm-again does not exist after switch from other commit") - } - -} - -func TestGitSubmoduleHandling2(t *testing.T) { - tempDir, err := ioutil.TempDir("", "go-vcs-git-submodule-tests2") - if err != nil { - t.Error(err) - } - defer func() { - err = os.RemoveAll(tempDir) - if err != nil { - t.Error(err) - } - }() - - repo, err := NewGitRepo("https://github.com/cloudfoundry/sonde-go", tempDir+"/VCSTestRepo2") - if err != nil { - t.Error(err) - } - - if repo.Vcs() != Git { - t.Error("Git is detecting the wrong type") - } - - // Check the basic getters. - if repo.Remote() != "https://github.com/cloudfoundry/sonde-go" { - t.Error("Remote not set properly") - } - if repo.LocalPath() != tempDir+"/VCSTestRepo2" { - t.Error("Local disk location not set properly") - } - - //Logger = log.New(os.Stdout, "", log.LstdFlags) - - // Do an initial clone. - err = repo.Get() - if err != nil { - t.Errorf("Unable to clone Git repo. Err was %s", err) - } - - // Verify Git repo is a Git repo - if !repo.CheckLocal() { - t.Error("Problem checking out repo or Git CheckLocal is not working") - } - - // Test internal lookup mechanism used outside of Git specific functionality. - ltype, err := DetectVcsFromFS(tempDir + "/VCSTestRepo2") - if err != nil { - t.Error("detectVcsFromFS unable to Git repo") - } - if ltype != Git { - t.Errorf("detectVcsFromFS detected %s instead of Git type", ltype) - } - - // Test NewRepo on existing checkout. This should simply provide a working - // instance without error based on looking at the local directory. - nrepo, nrerr := NewRepo("https://github.com/cloudfoundry/sonde-go", tempDir+"/VCSTestRepo2") - if nrerr != nil { - t.Error(nrerr) - } - // Verify the right oject is returned. It will check the local repo type. - if !nrepo.CheckLocal() { - t.Error("Wrong version returned from NewRepo") - } - - // Perform an update. - err = repo.Update() - if err != nil { - t.Error(err) - } - - v, err := repo.Current() - if err != nil { - t.Errorf("Error trying Git Current: %s", err) - } - if v != "master" { - t.Errorf("Current failed to detect Git on tip of master. Got version: %s", v) - } - - - tempDir2, err := ioutil.TempDir("", "go-vcs-git-tests-export") - if err != nil { - t.Fatalf("Error creating temp directory: %s", err) - } - defer func() { - err = os.RemoveAll(tempDir2) - if err != nil { - t.Error(err) - } - }() - - exportDir := filepath.Join(tempDir2, "src") - - err = repo.ExportDir(exportDir) - if err != nil { - t.Errorf("Unable to export Git repo. Err was %s", err) - } - - _, err = os.Stat(filepath.Join(exportDir, "README.md")) - if err != nil { - t.Errorf("Error checking exported file in Git: %s", err) - } - - _, err = os.Stat(filepath.Join( filepath.Join(exportDir, "definitions"), "README.md")) - if err != nil { - t.Errorf("Error checking exported file in Git: %s", err) - } - - _, err = os.Stat(filepath.Join(exportDir, string(repo.Vcs()))) - if err != nil { - if found := os.IsNotExist(err); !found { - t.Errorf("Error checking exported metadata in Git: %s", err) - } - } else { - t.Error("Error checking Git metadata. It exists.") - } -} diff --git a/installer/vendor/github.com/Masterminds/vcs/glide.yaml b/installer/vendor/github.com/Masterminds/vcs/glide.yaml deleted file mode 100644 index b96e0bd3e7..0000000000 --- a/installer/vendor/github.com/Masterminds/vcs/glide.yaml +++ /dev/null @@ -1,8 +0,0 @@ -package: github.com/Masterminds/vcs -homepage: https://github.com/Masterminds/vcs -license: MIT -owners: -- name: Matt Farina - email: matt@mattfarina.com - homepage: https://www.mattfarina.com/ -import: [] diff --git a/installer/vendor/github.com/Masterminds/vcs/hg_test.go b/installer/vendor/github.com/Masterminds/vcs/hg_test.go deleted file mode 100644 index 6b19f72809..0000000000 --- a/installer/vendor/github.com/Masterminds/vcs/hg_test.go +++ /dev/null @@ -1,332 +0,0 @@ -package vcs - -import ( - "io/ioutil" - "path/filepath" - "strings" - "time" - //"log" - "os" - "testing" -) - -// Canary test to ensure HgRepo implements the Repo interface. -var _ Repo = &HgRepo{} - -// To verify hg is working we perform integration testing -// with a known hg service. - -func TestHg(t *testing.T) { - - tempDir, err := ioutil.TempDir("", "go-vcs-hg-tests") - if err != nil { - t.Error(err) - } - defer func() { - err = os.RemoveAll(tempDir) - if err != nil { - t.Error(err) - } - }() - - repo, err := NewHgRepo("https://bitbucket.org/mattfarina/testhgrepo", tempDir+"/testhgrepo") - if err != nil { - t.Error(err) - } - - if repo.Vcs() != Hg { - t.Error("Hg is detecting the wrong type") - } - - // Check the basic getters. - if repo.Remote() != "https://bitbucket.org/mattfarina/testhgrepo" { - t.Error("Remote not set properly") - } - if repo.LocalPath() != tempDir+"/testhgrepo" { - t.Error("Local disk location not set properly") - } - - //Logger = log.New(os.Stdout, "", log.LstdFlags) - - // Do an initial clone. - err = repo.Get() - if err != nil { - t.Errorf("Unable to clone Hg repo. Err was %s", err) - } - - // Verify Hg repo is a Hg repo - if !repo.CheckLocal() { - t.Error("Problem checking out repo or Hg CheckLocal is not working") - } - - // Test internal lookup mechanism used outside of Hg specific functionality. - ltype, err := DetectVcsFromFS(tempDir + "/testhgrepo") - if err != nil { - t.Error("detectVcsFromFS unable to Hg repo") - } - if ltype != Hg { - t.Errorf("detectVcsFromFS detected %s instead of Hg type", ltype) - } - - // Test NewRepo on existing checkout. This should simply provide a working - // instance without error based on looking at the local directory. - nrepo, nrerr := NewRepo("https://bitbucket.org/mattfarina/testhgrepo", tempDir+"/testhgrepo") - if nrerr != nil { - t.Error(nrerr) - } - // Verify the right oject is returned. It will check the local repo type. - if !nrepo.CheckLocal() { - t.Error("Wrong version returned from NewRepo") - } - - v, err := repo.Current() - if err != nil { - t.Errorf("Error trying Hg Current: %s", err) - } - if v != "default" { - t.Errorf("Current failed to detect Hg on tip of default. Got version: %s", v) - } - - // Set the version using the short hash. - err = repo.UpdateVersion("a5494ba2177f") - if err != nil { - t.Errorf("Unable to update Hg repo version. Err was %s", err) - } - - // Use Version to verify we are on the right version. - v, err = repo.Version() - if v != "a5494ba2177ff9ef26feb3c155dfecc350b1a8ef" { - t.Errorf("Error checking checked out Hg version: %s", v) - } - if err != nil { - t.Error(err) - } - - v, err = repo.Current() - if err != nil { - t.Errorf("Error trying Hg Current for ref: %s", err) - } - if v != "a5494ba2177ff9ef26feb3c155dfecc350b1a8ef" { - t.Errorf("Current failed to detect Hg on ref of branch. Got version: %s", v) - } - - // Use Date to verify we are on the right commit. - d, err := repo.Date() - if err != nil { - t.Error(err) - } - if d.Format(longForm) != "2015-07-30 16:14:08 -0400" { - t.Error("Error checking checked out Hg commit date. Got wrong date:", d) - } - - // Perform an update. - err = repo.Update() - if err != nil { - t.Error(err) - } - - v, err = repo.Version() - if v != "9c6ccbca73e8a1351c834f33f57f1f7a0329ad35" { - t.Errorf("Error checking checked out Hg version: %s", v) - } - if err != nil { - t.Error(err) - } - - tags, err := repo.Tags() - if err != nil { - t.Error(err) - } - if tags[1] != "1.0.0" { - t.Error("Hg tags is not reporting the correct version") - } - - tags, err = repo.TagsFromCommit("a5494ba2177f") - if err != nil { - t.Error(err) - } - if len(tags) != 0 { - t.Error("Hg is incorrectly returning tags for a commit") - } - - tags, err = repo.TagsFromCommit("d680e82228d2") - if err != nil { - t.Error(err) - } - if len(tags) != 1 || tags[0] != "1.0.0" { - t.Error("Hg is incorrectly returning tags for a commit") - } - - branches, err := repo.Branches() - if err != nil { - t.Error(err) - } - // The branches should be HEAD, master, and test. - if branches[0] != "test" { - t.Error("Hg is incorrectly returning branches") - } - - if !repo.IsReference("1.0.0") { - t.Error("Hg is reporting a reference is not one") - } - - if !repo.IsReference("test") { - t.Error("Hg is reporting a reference is not one") - } - - if repo.IsReference("foo") { - t.Error("Hg is reporting a non-existent reference is one") - } - - if repo.IsDirty() { - t.Error("Hg incorrectly reporting dirty") - } - - ci, err := repo.CommitInfo("a5494ba2177f") - if err != nil { - t.Error(err) - } - if ci.Commit != "a5494ba2177ff9ef26feb3c155dfecc350b1a8ef" { - t.Error("Hg.CommitInfo wrong commit id") - } - if ci.Author != "Matt Farina " { - t.Error("Hg.CommitInfo wrong author") - } - if ci.Message != "A commit" { - t.Error("Hg.CommitInfo wrong message") - } - - ti := time.Unix(1438287248, 0) - if !ti.Equal(ci.Date) { - t.Error("Hg.CommitInfo wrong date") - } - - _, err = repo.CommitInfo("asdfasdfasdf") - if err != ErrRevisionUnavailable { - t.Error("Hg didn't return expected ErrRevisionUnavailable") - } - - tempDir2, err := ioutil.TempDir("", "go-vcs-hg-tests-export") - if err != nil { - t.Fatalf("Error creating temp directory: %s", err) - } - defer func() { - err = os.RemoveAll(tempDir2) - if err != nil { - t.Error(err) - } - }() - - exportDir := filepath.Join(tempDir2, "src") - - err = repo.ExportDir(exportDir) - if err != nil { - t.Errorf("Unable to export Hg repo. Err was %s", err) - } - - _, err = os.Stat(filepath.Join(exportDir, "Readme.md")) - if err != nil { - t.Errorf("Error checking exported file in Hg: %s", err) - } - - _, err = os.Stat(filepath.Join(exportDir, string(repo.Vcs()))) - if err != nil { - if found := os.IsNotExist(err); !found { - t.Errorf("Error checking exported metadata in Hg: %s", err) - } - } else { - t.Error("Error checking Hg metadata. It exists.") - } -} - -func TestHgCheckLocal(t *testing.T) { - // Verify repo.CheckLocal fails for non-Hg directories. - // TestHg is already checking on a valid repo - tempDir, err := ioutil.TempDir("", "go-vcs-hg-tests") - if err != nil { - t.Error(err) - } - defer func() { - err = os.RemoveAll(tempDir) - if err != nil { - t.Error(err) - } - }() - - repo, _ := NewHgRepo("", tempDir) - if repo.CheckLocal() { - t.Error("Hg CheckLocal does not identify non-Hg location") - } - - // Test NewRepo when there's no local. This should simply provide a working - // instance without error based on looking at the remote localtion. - _, nrerr := NewRepo("https://bitbucket.org/mattfarina/testhgrepo", tempDir+"/testhgrepo") - if nrerr != nil { - t.Error(nrerr) - } -} - -func TestHgPing(t *testing.T) { - tempDir, err := ioutil.TempDir("", "go-vcs-hg-tests") - if err != nil { - t.Error(err) - } - defer func() { - err = os.RemoveAll(tempDir) - if err != nil { - t.Error(err) - } - }() - - repo, err := NewHgRepo("https://bitbucket.org/mattfarina/testhgrepo", tempDir) - if err != nil { - t.Error(err) - } - - ping := repo.Ping() - if !ping { - t.Error("Hg unable to ping working repo") - } - - repo, err = NewHgRepo("https://bitbucket.org/mattfarina/ihopethisneverexistsbecauseitshouldnt", tempDir) - if err != nil { - t.Error(err) - } - - ping = repo.Ping() - if ping { - t.Error("Hg got a ping response from when it should not have") - } -} - -func TestHgInit(t *testing.T) { - tempDir, err := ioutil.TempDir("", "go-vcs-hg-tests") - repoDir := tempDir + "/repo" - if err != nil { - t.Error(err) - } - defer func() { - err = os.RemoveAll(tempDir) - if err != nil { - t.Error(err) - } - }() - - repo, err := NewHgRepo(repoDir, repoDir) - if err != nil { - t.Error(err) - } - - err = repo.Init() - if err != nil { - t.Error(err) - } - - v, err := repo.Version() - if err != nil { - t.Error(err) - } - if !strings.HasPrefix(v, "000000") { - t.Errorf("Hg Init reporting wrong initial version: %s", v) - } -} diff --git a/installer/vendor/github.com/Masterminds/vcs/repo_test.go b/installer/vendor/github.com/Masterminds/vcs/repo_test.go deleted file mode 100644 index 8c083b3fc4..0000000000 --- a/installer/vendor/github.com/Masterminds/vcs/repo_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package vcs - -import ( - "fmt" - "io/ioutil" - "os" - "testing" -) - -func ExampleNewRepo() { - remote := "https://github.com/Masterminds/vcs" - local, _ := ioutil.TempDir("", "go-vcs") - repo, _ := NewRepo(remote, local) - // Returns: instance of GitRepo - - repo.Vcs() - // Returns Git as this is a Git repo - - err := repo.Get() - // Pulls down a repo, or a checkout in the case of SVN, and returns an - // error if that didn't happen successfully. - if err != nil { - fmt.Println(err) - } - - err = repo.UpdateVersion("master") - // Checkouts out a specific version. In most cases this can be a commit id, - // branch, or tag. - if err != nil { - fmt.Println(err) - } -} - -func TestTypeSwitch(t *testing.T) { - - // To test repo type switching we checkout as SVN and then try to get it as - // a git repo afterwards. - tempDir, err := ioutil.TempDir("", "go-vcs-svn-tests") - if err != nil { - t.Error(err) - } - defer func() { - err = os.RemoveAll(tempDir) - if err != nil { - t.Error(err) - } - }() - - repo, err := NewSvnRepo("https://github.com/Masterminds/VCSTestRepo/trunk", tempDir+string(os.PathSeparator)+"VCSTestRepo") - if err != nil { - t.Error(err) - } - err = repo.Get() - if err != nil { - t.Errorf("Unable to checkout SVN repo for repo switching tests. Err was %s", err) - } - - _, err = NewRepo("https://github.com/Masterminds/VCSTestRepo", tempDir+string(os.PathSeparator)+"VCSTestRepo") - if err != ErrWrongVCS { - t.Errorf("Not detecting repo switch from SVN to Git") - } -} - -func TestDepInstalled(t *testing.T) { - i := depInstalled("git") - if !i { - t.Error("depInstalled not finding installed dep.") - } - - i = depInstalled("thisreallyisntinstalled") - if i { - t.Error("depInstalled finding not installed dep.") - } -} diff --git a/installer/vendor/github.com/Masterminds/vcs/svn_test.go b/installer/vendor/github.com/Masterminds/vcs/svn_test.go deleted file mode 100644 index 93fc139ab9..0000000000 --- a/installer/vendor/github.com/Masterminds/vcs/svn_test.go +++ /dev/null @@ -1,337 +0,0 @@ -package vcs - -import ( - "io/ioutil" - "path/filepath" - "time" - //"log" - "os" - "testing" -) - -// To verify svn is working we perform integration testing -// with a known svn service. - -// Canary test to ensure SvnRepo implements the Repo interface. -var _ Repo = &SvnRepo{} - -func TestSvn(t *testing.T) { - - tempDir, err := ioutil.TempDir("", "go-vcs-svn-tests") - if err != nil { - t.Error(err) - } - defer func() { - err = os.RemoveAll(tempDir) - if err != nil { - t.Error(err) - } - }() - - repo, err := NewSvnRepo("https://github.com/Masterminds/VCSTestRepo/trunk", tempDir+string(os.PathSeparator)+"VCSTestRepo") - if err != nil { - t.Error(err) - } - - if repo.Vcs() != Svn { - t.Error("Svn is detecting the wrong type") - } - - // Check the basic getters. - if repo.Remote() != "https://github.com/Masterminds/VCSTestRepo/trunk" { - t.Error("Remote not set properly") - } - if repo.LocalPath() != tempDir+string(os.PathSeparator)+"VCSTestRepo" { - t.Error("Local disk location not set properly") - } - - //Logger = log.New(os.Stdout, "", log.LstdFlags) - - // Do an initial checkout. - err = repo.Get() - if err != nil { - t.Errorf("Unable to checkout SVN repo. Err was %s", err) - } - - // Verify SVN repo is a SVN repo - if !repo.CheckLocal() { - t.Error("Problem checking out repo or SVN CheckLocal is not working") - } - - // Verify an incorrect remote is caught when NewSvnRepo is used on an existing location - _, nrerr := NewSvnRepo("https://github.com/Masterminds/VCSTestRepo/unknownbranch", tempDir+"/VCSTestRepo") - if nrerr != ErrWrongRemote { - t.Error("ErrWrongRemote was not triggered for SVN") - } - - // Test internal lookup mechanism used outside of Hg specific functionality. - ltype, err := DetectVcsFromFS(tempDir + "/VCSTestRepo") - if err != nil { - t.Error("detectVcsFromFS unable to Svn repo") - } - if ltype != Svn { - t.Errorf("detectVcsFromFS detected %s instead of Svn type", ltype) - } - - // Commenting out auto-detection tests for SVN. NewRepo automatically detects - // GitHub to be a Git repo and that's an issue for this test. Need an - // SVN host that can autodetect from before using this test again. - // - // Test NewRepo on existing checkout. This should simply provide a working - // instance without error based on looking at the local directory. - // nrepo, nrerr := NewRepo("https://github.com/Masterminds/VCSTestRepo/trunk", tempDir+"/VCSTestRepo") - // if nrerr != nil { - // t.Error(nrerr) - // } - // // Verify the right oject is returned. It will check the local repo type. - // if nrepo.CheckLocal() == false { - // t.Error("Wrong version returned from NewRepo") - // } - - v, err := repo.Current() - if err != nil { - t.Errorf("Error trying Svn Current: %s", err) - } - if v != "HEAD" { - t.Errorf("Current failed to detect Svn on HEAD. Got version: %s", v) - } - - // Update the version to a previous version. - err = repo.UpdateVersion("r2") - if err != nil { - t.Errorf("Unable to update SVN repo version. Err was %s", err) - } - - // Use Version to verify we are on the right version. - v, err = repo.Version() - if v != "2" { - t.Error("Error checking checked SVN out version") - } - if err != nil { - t.Error(err) - } - - v, err = repo.Current() - if err != nil { - t.Errorf("Error trying Svn Current for ref: %s", err) - } - if v != "2" { - t.Errorf("Current failed to detect Svn on HEAD. Got version: %s", v) - } - - // Perform an update which should take up back to the latest version. - err = repo.Update() - if err != nil { - t.Error(err) - } - - // Make sure we are on a newer version because of the update. - v, err = repo.Version() - if v == "2" { - t.Error("Error with version. Still on old version. Update failed") - } - if err != nil { - t.Error(err) - } - - // Use Date to verify we are on the right commit. - d, err := repo.Date() - if d.Format(longForm) != "2015-07-29 13:47:03 +0000" { - t.Error("Error checking checked out Svn commit date") - } - if err != nil { - t.Error(err) - } - - tags, err := repo.Tags() - if err != nil { - t.Error(err) - } - if len(tags) != 0 { - t.Error("Svn is incorrectly returning tags") - } - - tags, err = repo.TagsFromCommit("2") - if err != nil { - t.Error(err) - } - if len(tags) != 0 { - t.Error("Svn is incorrectly returning tags for a commit") - } - - branches, err := repo.Branches() - if err != nil { - t.Error(err) - } - if len(branches) != 0 { - t.Error("Svn is incorrectly returning branches") - } - - if !repo.IsReference("r4") { - t.Error("Svn is reporting a reference is not one") - } - - if repo.IsReference("55") { - t.Error("Svn is reporting a non-existent reference is one") - } - - if repo.IsDirty() { - t.Error("Svn incorrectly reporting dirty") - } - - ci, err := repo.CommitInfo("2") - if err != nil { - t.Error(err) - } - if ci.Commit != "2" { - t.Error("Svn.CommitInfo wrong commit id") - } - if ci.Author != "matt.farina" { - t.Error("Svn.CommitInfo wrong author") - } - if ci.Message != "Update README.md" { - t.Error("Svn.CommitInfo wrong message") - } - ti, err := time.Parse(time.RFC3339Nano, "2015-07-29T13:46:20.000000Z") - if err != nil { - t.Error(err) - } - if !ti.Equal(ci.Date) { - t.Error("Svn.CommitInfo wrong date") - } - - _, err = repo.CommitInfo("555555555") - if err != ErrRevisionUnavailable { - t.Error("Svn didn't return expected ErrRevisionUnavailable") - } - - tempDir2, err := ioutil.TempDir("", "go-vcs-svn-tests-export") - if err != nil { - t.Fatalf("Error creating temp directory: %s", err) - } - defer func() { - err = os.RemoveAll(tempDir2) - if err != nil { - t.Error(err) - } - }() - - exportDir := filepath.Join(tempDir2, "src") - - err = repo.ExportDir(exportDir) - if err != nil { - t.Errorf("Unable to export Svn repo. Err was %s", err) - } - - _, err = os.Stat(filepath.Join(exportDir, "README.md")) - if err != nil { - t.Errorf("Error checking exported file in Svn: %s", err) - } - - _, err = os.Stat(filepath.Join(exportDir, string(repo.Vcs()))) - if err != nil { - if found := os.IsNotExist(err); !found { - t.Errorf("Error checking exported metadata in Svn: %s", err) - } - } else { - t.Error("Error checking Svn metadata. It exists.") - } -} - -func TestSvnCheckLocal(t *testing.T) { - // Verify repo.CheckLocal fails for non-SVN directories. - // TestSvn is already checking on a valid repo - tempDir, err := ioutil.TempDir("", "go-vcs-svn-tests") - if err != nil { - t.Error(err) - } - defer func() { - err = os.RemoveAll(tempDir) - if err != nil { - t.Error(err) - } - }() - - repo, _ := NewSvnRepo("", tempDir) - if repo.CheckLocal() { - t.Error("SVN CheckLocal does not identify non-SVN location") - } - - // Test NewRepo when there's no local. This should simply provide a working - // instance without error based on looking at the remote localtion. - _, nrerr := NewRepo("https://github.com/Masterminds/VCSTestRepo/trunk", tempDir+"/VCSTestRepo") - if nrerr != nil { - t.Error(nrerr) - } -} - -func TestSvnPing(t *testing.T) { - tempDir, err := ioutil.TempDir("", "go-vcs-svn-tests") - if err != nil { - t.Error(err) - } - defer func() { - err = os.RemoveAll(tempDir) - if err != nil { - t.Error(err) - } - }() - - repo, err := NewSvnRepo("https://github.com/Masterminds/VCSTestRepo/trunk", tempDir) - if err != nil { - t.Error(err) - } - - ping := repo.Ping() - if !ping { - t.Error("Svn unable to ping working repo") - } - - repo, err = NewSvnRepo("https://github.com/Masterminds/ihopethisneverexistsbecauseitshouldnt", tempDir) - if err != nil { - t.Error(err) - } - - ping = repo.Ping() - if ping { - t.Error("Svn got a ping response from when it should not have") - } -} - -func TestSvnInit(t *testing.T) { - tempDir, err := ioutil.TempDir("", "go-vcs-svn-tests") - remoteDir := tempDir + string(os.PathSeparator) + "remoteDir" - localDir := tempDir + string(os.PathSeparator) + "localDir" - if err != nil { - t.Error(err) - } - defer func() { - err = os.RemoveAll(tempDir) - if err != nil { - t.Error(err) - } - }() - - repo, err := NewSvnRepo(remoteDir, localDir) - if err != nil { - t.Error(err) - } - - err = repo.Init() - if err != nil { - t.Error(err) - } - - err = repo.Get() - if err != nil { - t.Error(err) - } - - v, err := repo.Version() - if err != nil { - t.Error(err) - } - if v != "0" { - t.Errorf("Svn Init returns wrong version: %s", v) - } -} diff --git a/installer/vendor/github.com/Masterminds/vcs/vcs_remote_lookup_test.go b/installer/vendor/github.com/Masterminds/vcs/vcs_remote_lookup_test.go deleted file mode 100644 index 938cb0ebc5..0000000000 --- a/installer/vendor/github.com/Masterminds/vcs/vcs_remote_lookup_test.go +++ /dev/null @@ -1,137 +0,0 @@ -package vcs - -import ( - "io/ioutil" - "os" - "os/exec" - "runtime" - "strings" - "testing" -) - -func TestVCSLookup(t *testing.T) { - // TODO: Expand to make sure it detected the right vcs. - urlList := map[string]struct { - work bool - t Type - }{ - "https://github.com/masterminds": {work: false, t: Git}, - "https://github.com/Masterminds/VCSTestRepo": {work: true, t: Git}, - "https://bitbucket.org/mattfarina/testhgrepo": {work: true, t: Hg}, - "https://bitbucket.org/mattfarina/repo-does-not-exist": {work: false, t: Hg}, - "https://bitbucket.org/mattfarina/private-repo-for-vcs-testing": {work: false, t: Hg}, - "https://launchpad.net/govcstestbzrrepo/trunk": {work: true, t: Bzr}, - "https://launchpad.net/~mattfarina/+junk/mygovcstestbzrrepo": {work: true, t: Bzr}, - "https://launchpad.net/~mattfarina/+junk/mygovcstestbzrrepo/trunk": {work: true, t: Bzr}, - "https://git.launchpad.net/govcstestgitrepo": {work: true, t: Git}, - "https://git.launchpad.net/~mattfarina/+git/mygovcstestgitrepo": {work: true, t: Git}, - "https://hub.jazz.net/git/user1/pkgname": {work: true, t: Git}, - "https://hub.jazz.net/git/user1/pkgname/subpkg/subpkg/subpkg": {work: true, t: Git}, - "https://hubs.jazz.net/git/user1/pkgname": {work: false, t: Git}, - "https://example.com/foo/bar.git": {work: true, t: Git}, - "https://example.com/foo/bar.svn": {work: true, t: Svn}, - "https://example.com/foo/bar/baz.bzr": {work: true, t: Bzr}, - "https://example.com/foo/bar/baz.hg": {work: true, t: Hg}, - "https://gopkg.in/tomb.v1": {work: true, t: Git}, - "https://golang.org/x/net": {work: true, t: Git}, - "https://speter.net/go/exp/math/dec/inf": {work: true, t: Git}, - "https://git.openstack.org/foo/bar": {work: true, t: Git}, - "git@github.com:Masterminds/vcs.git": {work: true, t: Git}, - "git@example.com:foo.git": {work: true, t: Git}, - "ssh://hg@bitbucket.org/mattfarina/testhgrepo": {work: true, t: Hg}, - "git@bitbucket.org:mattfarina/glide-bitbucket-example.git": {work: true, t: Git}, - "git+ssh://example.com/foo/bar": {work: true, t: Git}, - "git://example.com/foo/bar": {work: true, t: Git}, - "bzr+ssh://example.com/foo/bar": {work: true, t: Bzr}, - "svn+ssh://example.com/foo/bar": {work: true, t: Svn}, - "git@example.com:foo/bar": {work: true, t: Git}, - "hg@example.com:foo/bar": {work: true, t: Hg}, - } - - for u, c := range urlList { - ty, _, err := detectVcsFromRemote(u) - if err == nil && !c.work { - t.Errorf("Error detecting VCS from URL(%s)", u) - } - - if err == ErrCannotDetectVCS && c.work { - t.Errorf("Error detecting VCS from URL(%s)", u) - } - - if err != nil && c.work { - t.Errorf("Error detecting VCS from URL(%s): %s", u, err) - } - - if err != nil && - err != ErrCannotDetectVCS && - !strings.HasSuffix(err.Error(), "Not Found") && - !strings.HasSuffix(err.Error(), "Access Denied") && - !c.work { - t.Errorf("Unexpected error returned (%s): %s", u, err) - } - - if c.work && ty != c.t { - t.Errorf("Incorrect VCS type returned(%s)", u) - } - } -} - -func TestVCSFileLookup(t *testing.T) { - tempDir, err := ioutil.TempDir("", "go-vcs-file-lookup-tests") - if err != nil { - t.Error(err) - } - defer func() { - err = os.RemoveAll(tempDir) - if err != nil { - t.Error(err) - } - }() - - _, err = exec.Command("git", "init", tempDir).CombinedOutput() - if err != nil { - t.Error(err) - } - - // On Windows it should be file:// followed by /C:\for\bar. That / before - // the drive needs to be included in testing. - var pth string - if runtime.GOOS == "windows" { - pth = "file:///" + tempDir - } else { - pth = "file://" + tempDir - } - ty, _, err := detectVcsFromRemote(pth) - - if err != nil { - t.Errorf("Unable to detect file:// path: %s", err) - } - - if ty != Git { - t.Errorf("Detected wrong type from file:// path. Found type %v", ty) - } -} - -func TestNotFound(t *testing.T) { - _, _, err := detectVcsFromRemote("https://mattfarina.com/notfound") - if err == nil || !strings.HasSuffix(err.Error(), " Not Found") { - t.Errorf("Failed to find not found repo") - } - - _, err = NewRepo("https://mattfarina.com/notfound", "") - if err == nil || !strings.HasSuffix(err.Error(), " Not Found") { - t.Errorf("Failed to find not found repo") - } -} - -func TestAccessDenied(t *testing.T) { - _, _, err := detectVcsFromRemote("https://bitbucket.org/mattfarina/private-repo-for-vcs-testing") - if err == nil || err.Error() != "Access Denied" { - t.Errorf("Failed to detect access denied") - } - - _, err = NewRepo("https://bitbucket.org/mattfarina/private-repo-for-vcs-testing", "") - if err == nil || err.Error() != "Access Denied" { - t.Errorf("Failed to detect access denied") - } -} diff --git a/installer/vendor/github.com/Sirupsen/logrus/.gitignore b/installer/vendor/github.com/Sirupsen/logrus/.gitignore deleted file mode 100644 index 66be63a005..0000000000 --- a/installer/vendor/github.com/Sirupsen/logrus/.gitignore +++ /dev/null @@ -1 +0,0 @@ -logrus diff --git a/installer/vendor/github.com/Sirupsen/logrus/.travis.yml b/installer/vendor/github.com/Sirupsen/logrus/.travis.yml deleted file mode 100644 index dee4eb2cc7..0000000000 --- a/installer/vendor/github.com/Sirupsen/logrus/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go -go: - - 1.3 - - 1.4 - - 1.5 - - 1.6 - - tip -install: - - go get -t ./... -script: GOMAXPROCS=4 GORACE="halt_on_error=1" go test -race -v ./... diff --git a/installer/vendor/github.com/Sirupsen/logrus/CHANGELOG.md b/installer/vendor/github.com/Sirupsen/logrus/CHANGELOG.md deleted file mode 100644 index f2c2bc2111..0000000000 --- a/installer/vendor/github.com/Sirupsen/logrus/CHANGELOG.md +++ /dev/null @@ -1,66 +0,0 @@ -# 0.10.0 - -* feature: Add a test hook (#180) -* feature: `ParseLevel` is now case-insensitive (#326) -* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308) -* performance: avoid re-allocations on `WithFields` (#335) - -# 0.9.0 - -* logrus/text_formatter: don't emit empty msg -* logrus/hooks/airbrake: move out of main repository -* logrus/hooks/sentry: move out of main repository -* logrus/hooks/papertrail: move out of main repository -* logrus/hooks/bugsnag: move out of main repository -* logrus/core: run tests with `-race` -* logrus/core: detect TTY based on `stderr` -* logrus/core: support `WithError` on logger -* logrus/core: Solaris support - -# 0.8.7 - -* logrus/core: fix possible race (#216) -* logrus/doc: small typo fixes and doc improvements - - -# 0.8.6 - -* hooks/raven: allow passing an initialized client - -# 0.8.5 - -* logrus/core: revert #208 - -# 0.8.4 - -* formatter/text: fix data race (#218) - -# 0.8.3 - -* logrus/core: fix entry log level (#208) -* logrus/core: improve performance of text formatter by 40% -* logrus/core: expose `LevelHooks` type -* logrus/core: add support for DragonflyBSD and NetBSD -* formatter/text: print structs more verbosely - -# 0.8.2 - -* logrus: fix more Fatal family functions - -# 0.8.1 - -* logrus: fix not exiting on `Fatalf` and `Fatalln` - -# 0.8.0 - -* logrus: defaults to stderr instead of stdout -* hooks/sentry: add special field for `*http.Request` -* formatter/text: ignore Windows for colors - -# 0.7.3 - -* formatter/\*: allow configuration of timestamp layout - -# 0.7.2 - -* formatter/text: Add configuration option for time format (#158) diff --git a/installer/vendor/github.com/Sirupsen/logrus/README.md b/installer/vendor/github.com/Sirupsen/logrus/README.md deleted file mode 100644 index 126cd1fc2b..0000000000 --- a/installer/vendor/github.com/Sirupsen/logrus/README.md +++ /dev/null @@ -1,425 +0,0 @@ -# Logrus :walrus: [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/Sirupsen/logrus?status.svg)](https://godoc.org/github.com/Sirupsen/logrus) - -Logrus is a structured logger for Go (golang), completely API compatible with -the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not -yet stable (pre 1.0). Logrus itself is completely stable and has been used in -many large deployments. The core API is unlikely to change much but please -version control your Logrus to make sure you aren't fetching latest `master` on -every build.** - -Nicely color-coded in development (when a TTY is attached, otherwise just -plain text): - -![Colored](http://i.imgur.com/PY7qMwd.png) - -With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash -or Splunk: - -```json -{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the -ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} - -{"level":"warning","msg":"The group's number increased tremendously!", -"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"A giant walrus appears!", -"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", -"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} - -{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, -"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} -``` - -With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not -attached, the output is compatible with the -[logfmt](http://godoc.org/github.com/kr/logfmt) format: - -```text -time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 -time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 -time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true -time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 -time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 -time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true -exit status 1 -``` - -#### Example - -The simplest way to use Logrus is simply the package-level exported logger: - -```go -package main - -import ( - log "github.com/Sirupsen/logrus" -) - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - }).Info("A walrus appears") -} -``` - -Note that it's completely api-compatible with the stdlib logger, so you can -replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"` -and you'll now have the flexibility of Logrus. You can customize it all you -want: - -```go -package main - -import ( - "os" - log "github.com/Sirupsen/logrus" -) - -func init() { - // Log as JSON instead of the default ASCII formatter. - log.SetFormatter(&log.JSONFormatter{}) - - // Output to stderr instead of stdout, could also be a file. - log.SetOutput(os.Stderr) - - // Only log the warning severity or above. - log.SetLevel(log.WarnLevel) -} - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") - - log.WithFields(log.Fields{ - "omg": true, - "number": 122, - }).Warn("The group's number increased tremendously!") - - log.WithFields(log.Fields{ - "omg": true, - "number": 100, - }).Fatal("The ice breaks!") - - // A common pattern is to re-use fields between logging statements by re-using - // the logrus.Entry returned from WithFields() - contextLogger := log.WithFields(log.Fields{ - "common": "this is a common field", - "other": "I also should be logged always", - }) - - contextLogger.Info("I'll be logged with common and other field") - contextLogger.Info("Me too") -} -``` - -For more advanced usage such as logging to multiple locations from the same -application, you can also create an instance of the `logrus` Logger: - -```go -package main - -import ( - "github.com/Sirupsen/logrus" -) - -// Create a new instance of the logger. You can have any number of instances. -var log = logrus.New() - -func main() { - // The API for setting attributes is a little different than the package level - // exported logger. See Godoc. - log.Out = os.Stderr - - log.WithFields(logrus.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") -} -``` - -#### Fields - -Logrus encourages careful, structured logging though logging fields instead of -long, unparseable error messages. For example, instead of: `log.Fatalf("Failed -to send event %s to topic %s with key %d")`, you should log the much more -discoverable: - -```go -log.WithFields(log.Fields{ - "event": event, - "topic": topic, - "key": key, -}).Fatal("Failed to send event") -``` - -We've found this API forces you to think about logging in a way that produces -much more useful logging messages. We've been in countless situations where just -a single added field to a log statement that was already there would've saved us -hours. The `WithFields` call is optional. - -In general, with Logrus using any of the `printf`-family functions should be -seen as a hint you should add a field, however, you can still use the -`printf`-family functions with Logrus. - -#### Hooks - -You can add hooks for logging levels. For example to send errors to an exception -tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to -multiple places simultaneously, e.g. syslog. - -Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in -`init`: - -```go -import ( - log "github.com/Sirupsen/logrus" - "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake" - logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" - "log/syslog" -) - -func init() { - - // Use the Airbrake hook to report errors that have Error severity or above to - // an exception tracker. You can create custom hooks, see the Hooks section. - log.AddHook(airbrake.NewHook(123, "xyz", "production")) - - hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") - if err != nil { - log.Error("Unable to connect to local syslog daemon") - } else { - log.AddHook(hook) - } -} -``` -Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). - -| Hook | Description | -| ----- | ----------- | -| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. | -| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | -| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. | -| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | -| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | -| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. | -| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | -| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | -| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | -| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | -| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) | -| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) | -| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem | -| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger | -| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail | -| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar | -| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd | -| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb | -| [Influxus] (http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB] (http://influxdata.com/) | -| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb | -| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit | -| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic | -| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) | -| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) | -| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka | -| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) | -| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch| -| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)| -| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)| -| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) | -| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash | -| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) | -| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) | - - -#### Level logging - -Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. - -```go -log.Debug("Useful debugging information.") -log.Info("Something noteworthy happened!") -log.Warn("You should probably take a look at this.") -log.Error("Something failed but I'm not quitting.") -// Calls os.Exit(1) after logging -log.Fatal("Bye.") -// Calls panic() after logging -log.Panic("I'm bailing.") -``` - -You can set the logging level on a `Logger`, then it will only log entries with -that severity or anything above it: - -```go -// Will log anything that is info or above (warn, error, fatal, panic). Default. -log.SetLevel(log.InfoLevel) -``` - -It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose -environment if your application has that. - -#### Entries - -Besides the fields added with `WithField` or `WithFields` some fields are -automatically added to all logging events: - -1. `time`. The timestamp when the entry was created. -2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after - the `AddFields` call. E.g. `Failed to send event.` -3. `level`. The logging level. E.g. `info`. - -#### Environments - -Logrus has no notion of environment. - -If you wish for hooks and formatters to only be used in specific environments, -you should handle that yourself. For example, if your application has a global -variable `Environment`, which is a string representation of the environment you -could do: - -```go -import ( - log "github.com/Sirupsen/logrus" -) - -init() { - // do something here to set environment depending on an environment variable - // or command-line flag - if Environment == "production" { - log.SetFormatter(&log.JSONFormatter{}) - } else { - // The TextFormatter is default, you don't actually have to do this. - log.SetFormatter(&log.TextFormatter{}) - } -} -``` - -This configuration is how `logrus` was intended to be used, but JSON in -production is mostly only useful if you do log aggregation with tools like -Splunk or Logstash. - -#### Formatters - -The built-in logging formatters are: - -* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise - without colors. - * *Note:* to force colored output when there is no TTY, set the `ForceColors` - field to `true`. To force no colored output even if there is a TTY set the - `DisableColors` field to `true` -* `logrus.JSONFormatter`. Logs fields as JSON. - -Third party logging formatters: - -* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events. -* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. -* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. - -You can define your formatter by implementing the `Formatter` interface, -requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a -`Fields` type (`map[string]interface{}`) with all your fields as well as the -default ones (see Entries section above): - -```go -type MyJSONFormatter struct { -} - -log.SetFormatter(new(MyJSONFormatter)) - -func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { - // Note this doesn't include Time, Level and Message which are available on - // the Entry. Consult `godoc` on information about those fields or read the - // source of the official loggers. - serialized, err := json.Marshal(entry.Data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} -``` - -#### Logger as an `io.Writer` - -Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. - -```go -w := logger.Writer() -defer w.Close() - -srv := http.Server{ - // create a stdlib log.Logger that writes to - // logrus.Logger. - ErrorLog: log.New(w, "", 0), -} -``` - -Each line written to that writer will be printed the usual way, using formatters -and hooks. The level for those entries is `info`. - -#### Rotation - -Log rotation is not provided with Logrus. Log rotation should be done by an -external program (like `logrotate(8)`) that can compress and delete old log -entries. It should not be a feature of the application-level logger. - -#### Tools - -| Tool | Description | -| ---- | ----------- | -|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.| -|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper arround Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) | - -#### Testing - -Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: - -* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook -* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): - -```go -logger, hook := NewNullLogger() -logger.Error("Hello error") - -assert.Equal(1, len(hook.Entries)) -assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level) -assert.Equal("Hello error", hook.LastEntry().Message) - -hook.Reset() -assert.Nil(hook.LastEntry()) -``` - -#### Fatal handlers - -Logrus can register one or more functions that will be called when any `fatal` -level message is logged. The registered handlers will be executed before -logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need -to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted. - -``` -... -handler := func() { - // gracefully shutdown something... -} -logrus.RegisterExitHandler(handler) -... -``` - -#### Thread safty - -By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs. -If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking. - -Situation when locking is not needed includes: - -* You have no hooks registered, or hooks calling is already thread-safe. - -* Writing to logger.Out is already thread-safe, for example: - - 1) logger.Out is protected by locks. - - 2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing) - - (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/) diff --git a/installer/vendor/github.com/Sirupsen/logrus/alt_exit.go b/installer/vendor/github.com/Sirupsen/logrus/alt_exit.go index b4c9e84754..8af90637a9 100644 --- a/installer/vendor/github.com/Sirupsen/logrus/alt_exit.go +++ b/installer/vendor/github.com/Sirupsen/logrus/alt_exit.go @@ -1,7 +1,7 @@ package logrus // The following code was sourced and modified from the -// https://bitbucket.org/tebeka/atexit package governed by the following license: +// https://github.com/tebeka/atexit package governed by the following license: // // Copyright (c) 2012 Miki Tebeka . // diff --git a/installer/vendor/github.com/Sirupsen/logrus/alt_exit_test.go b/installer/vendor/github.com/Sirupsen/logrus/alt_exit_test.go deleted file mode 100644 index 022b778303..0000000000 --- a/installer/vendor/github.com/Sirupsen/logrus/alt_exit_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package logrus - -import ( - "io/ioutil" - "os/exec" - "testing" - "time" -) - -func TestRegister(t *testing.T) { - current := len(handlers) - RegisterExitHandler(func() {}) - if len(handlers) != current+1 { - t.Fatalf("can't add handler") - } -} - -func TestHandler(t *testing.T) { - gofile := "/tmp/testprog.go" - if err := ioutil.WriteFile(gofile, testprog, 0666); err != nil { - t.Fatalf("can't create go file") - } - - outfile := "/tmp/testprog.out" - arg := time.Now().UTC().String() - err := exec.Command("go", "run", gofile, outfile, arg).Run() - if err == nil { - t.Fatalf("completed normally, should have failed") - } - - data, err := ioutil.ReadFile(outfile) - if err != nil { - t.Fatalf("can't read output file %s", outfile) - } - - if string(data) != arg { - t.Fatalf("bad data") - } -} - -var testprog = []byte(` -// Test program for atexit, gets output file and data as arguments and writes -// data to output file in atexit handler. -package main - -import ( - "github.com/Sirupsen/logrus" - "flag" - "fmt" - "io/ioutil" -) - -var outfile = "" -var data = "" - -func handler() { - ioutil.WriteFile(outfile, []byte(data), 0666) -} - -func badHandler() { - n := 0 - fmt.Println(1/n) -} - -func main() { - flag.Parse() - outfile = flag.Arg(0) - data = flag.Arg(1) - - logrus.RegisterExitHandler(handler) - logrus.RegisterExitHandler(badHandler) - logrus.Fatal("Bye bye") -} -`) diff --git a/installer/vendor/github.com/Sirupsen/logrus/doc.go b/installer/vendor/github.com/Sirupsen/logrus/doc.go index dddd5f877b..da67aba06d 100644 --- a/installer/vendor/github.com/Sirupsen/logrus/doc.go +++ b/installer/vendor/github.com/Sirupsen/logrus/doc.go @@ -7,7 +7,7 @@ The simplest way to use Logrus is simply the package-level exported logger: package main import ( - log "github.com/Sirupsen/logrus" + log "github.com/sirupsen/logrus" ) func main() { @@ -21,6 +21,6 @@ The simplest way to use Logrus is simply the package-level exported logger: Output: time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 -For a full guide visit https://github.com/Sirupsen/logrus +For a full guide visit https://github.com/sirupsen/logrus */ package logrus diff --git a/installer/vendor/github.com/Sirupsen/logrus/entry.go b/installer/vendor/github.com/Sirupsen/logrus/entry.go index 4edbe7a2de..778f4c9f0d 100644 --- a/installer/vendor/github.com/Sirupsen/logrus/entry.go +++ b/installer/vendor/github.com/Sirupsen/logrus/entry.go @@ -35,6 +35,7 @@ type Entry struct { Time time.Time // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic + // This field will be set on entry firing and the value will be equal to the one in Logger struct field. Level Level // Message passed to Debug, Info, Warn, Error, Fatal or Panic @@ -93,40 +94,52 @@ func (entry Entry) log(level Level, msg string) { entry.Level = level entry.Message = msg - if err := entry.Logger.Hooks.Fire(level, &entry); err != nil { - entry.Logger.mu.Lock() - fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) - entry.Logger.mu.Unlock() - } + entry.fireHooks() + buffer = bufferPool.Get().(*bytes.Buffer) buffer.Reset() defer bufferPool.Put(buffer) entry.Buffer = buffer - serialized, err := entry.Logger.Formatter.Format(&entry) + + entry.write() + entry.Buffer = nil + + // To avoid Entry#log() returning a value that only would make sense for + // panic() to use in Entry#Panic(), we avoid the allocation by checking + // directly here. + if level <= PanicLevel { + panic(&entry) + } +} + +// This function is not declared with a pointer value because otherwise +// race conditions will occur when using multiple goroutines +func (entry Entry) fireHooks() { + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() + err := entry.Logger.Hooks.Fire(entry.Level, &entry) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) + } +} + +func (entry *Entry) write() { + serialized, err := entry.Logger.Formatter.Format(entry) + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() if err != nil { - entry.Logger.mu.Lock() fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) - entry.Logger.mu.Unlock() } else { - entry.Logger.mu.Lock() _, err = entry.Logger.Out.Write(serialized) if err != nil { fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) } - entry.Logger.mu.Unlock() - } - - // To avoid Entry#log() returning a value that only would make sense for - // panic() to use in Entry#Panic(), we avoid the allocation by checking - // directly here. - if level <= PanicLevel { - panic(&entry) } } func (entry *Entry) Debug(args ...interface{}) { - if entry.Logger.Level >= DebugLevel { + if entry.Logger.level() >= DebugLevel { entry.log(DebugLevel, fmt.Sprint(args...)) } } @@ -136,13 +149,13 @@ func (entry *Entry) Print(args ...interface{}) { } func (entry *Entry) Info(args ...interface{}) { - if entry.Logger.Level >= InfoLevel { + if entry.Logger.level() >= InfoLevel { entry.log(InfoLevel, fmt.Sprint(args...)) } } func (entry *Entry) Warn(args ...interface{}) { - if entry.Logger.Level >= WarnLevel { + if entry.Logger.level() >= WarnLevel { entry.log(WarnLevel, fmt.Sprint(args...)) } } @@ -152,20 +165,20 @@ func (entry *Entry) Warning(args ...interface{}) { } func (entry *Entry) Error(args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { + if entry.Logger.level() >= ErrorLevel { entry.log(ErrorLevel, fmt.Sprint(args...)) } } func (entry *Entry) Fatal(args ...interface{}) { - if entry.Logger.Level >= FatalLevel { + if entry.Logger.level() >= FatalLevel { entry.log(FatalLevel, fmt.Sprint(args...)) } Exit(1) } func (entry *Entry) Panic(args ...interface{}) { - if entry.Logger.Level >= PanicLevel { + if entry.Logger.level() >= PanicLevel { entry.log(PanicLevel, fmt.Sprint(args...)) } panic(fmt.Sprint(args...)) @@ -174,13 +187,13 @@ func (entry *Entry) Panic(args ...interface{}) { // Entry Printf family functions func (entry *Entry) Debugf(format string, args ...interface{}) { - if entry.Logger.Level >= DebugLevel { + if entry.Logger.level() >= DebugLevel { entry.Debug(fmt.Sprintf(format, args...)) } } func (entry *Entry) Infof(format string, args ...interface{}) { - if entry.Logger.Level >= InfoLevel { + if entry.Logger.level() >= InfoLevel { entry.Info(fmt.Sprintf(format, args...)) } } @@ -190,7 +203,7 @@ func (entry *Entry) Printf(format string, args ...interface{}) { } func (entry *Entry) Warnf(format string, args ...interface{}) { - if entry.Logger.Level >= WarnLevel { + if entry.Logger.level() >= WarnLevel { entry.Warn(fmt.Sprintf(format, args...)) } } @@ -200,20 +213,20 @@ func (entry *Entry) Warningf(format string, args ...interface{}) { } func (entry *Entry) Errorf(format string, args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { + if entry.Logger.level() >= ErrorLevel { entry.Error(fmt.Sprintf(format, args...)) } } func (entry *Entry) Fatalf(format string, args ...interface{}) { - if entry.Logger.Level >= FatalLevel { + if entry.Logger.level() >= FatalLevel { entry.Fatal(fmt.Sprintf(format, args...)) } Exit(1) } func (entry *Entry) Panicf(format string, args ...interface{}) { - if entry.Logger.Level >= PanicLevel { + if entry.Logger.level() >= PanicLevel { entry.Panic(fmt.Sprintf(format, args...)) } } @@ -221,13 +234,13 @@ func (entry *Entry) Panicf(format string, args ...interface{}) { // Entry Println family functions func (entry *Entry) Debugln(args ...interface{}) { - if entry.Logger.Level >= DebugLevel { + if entry.Logger.level() >= DebugLevel { entry.Debug(entry.sprintlnn(args...)) } } func (entry *Entry) Infoln(args ...interface{}) { - if entry.Logger.Level >= InfoLevel { + if entry.Logger.level() >= InfoLevel { entry.Info(entry.sprintlnn(args...)) } } @@ -237,7 +250,7 @@ func (entry *Entry) Println(args ...interface{}) { } func (entry *Entry) Warnln(args ...interface{}) { - if entry.Logger.Level >= WarnLevel { + if entry.Logger.level() >= WarnLevel { entry.Warn(entry.sprintlnn(args...)) } } @@ -247,20 +260,20 @@ func (entry *Entry) Warningln(args ...interface{}) { } func (entry *Entry) Errorln(args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { + if entry.Logger.level() >= ErrorLevel { entry.Error(entry.sprintlnn(args...)) } } func (entry *Entry) Fatalln(args ...interface{}) { - if entry.Logger.Level >= FatalLevel { + if entry.Logger.level() >= FatalLevel { entry.Fatal(entry.sprintlnn(args...)) } Exit(1) } func (entry *Entry) Panicln(args ...interface{}) { - if entry.Logger.Level >= PanicLevel { + if entry.Logger.level() >= PanicLevel { entry.Panic(entry.sprintlnn(args...)) } } diff --git a/installer/vendor/github.com/Sirupsen/logrus/entry_test.go b/installer/vendor/github.com/Sirupsen/logrus/entry_test.go deleted file mode 100644 index 99c3b41d5f..0000000000 --- a/installer/vendor/github.com/Sirupsen/logrus/entry_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package logrus - -import ( - "bytes" - "fmt" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestEntryWithError(t *testing.T) { - - assert := assert.New(t) - - defer func() { - ErrorKey = "error" - }() - - err := fmt.Errorf("kaboom at layer %d", 4711) - - assert.Equal(err, WithError(err).Data["error"]) - - logger := New() - logger.Out = &bytes.Buffer{} - entry := NewEntry(logger) - - assert.Equal(err, entry.WithError(err).Data["error"]) - - ErrorKey = "err" - - assert.Equal(err, entry.WithError(err).Data["err"]) - -} - -func TestEntryPanicln(t *testing.T) { - errBoom := fmt.Errorf("boom time") - - defer func() { - p := recover() - assert.NotNil(t, p) - - switch pVal := p.(type) { - case *Entry: - assert.Equal(t, "kaboom", pVal.Message) - assert.Equal(t, errBoom, pVal.Data["err"]) - default: - t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal) - } - }() - - logger := New() - logger.Out = &bytes.Buffer{} - entry := NewEntry(logger) - entry.WithField("err", errBoom).Panicln("kaboom") -} - -func TestEntryPanicf(t *testing.T) { - errBoom := fmt.Errorf("boom again") - - defer func() { - p := recover() - assert.NotNil(t, p) - - switch pVal := p.(type) { - case *Entry: - assert.Equal(t, "kaboom true", pVal.Message) - assert.Equal(t, errBoom, pVal.Data["err"]) - default: - t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal) - } - }() - - logger := New() - logger.Out = &bytes.Buffer{} - entry := NewEntry(logger) - entry.WithField("err", errBoom).Panicf("kaboom %v", true) -} diff --git a/installer/vendor/github.com/Sirupsen/logrus/examples/basic/basic.go b/installer/vendor/github.com/Sirupsen/logrus/examples/basic/basic.go deleted file mode 100644 index a1623ec003..0000000000 --- a/installer/vendor/github.com/Sirupsen/logrus/examples/basic/basic.go +++ /dev/null @@ -1,50 +0,0 @@ -package main - -import ( - "github.com/Sirupsen/logrus" -) - -var log = logrus.New() - -func init() { - log.Formatter = new(logrus.JSONFormatter) - log.Formatter = new(logrus.TextFormatter) // default - log.Level = logrus.DebugLevel -} - -func main() { - defer func() { - err := recover() - if err != nil { - log.WithFields(logrus.Fields{ - "omg": true, - "err": err, - "number": 100, - }).Fatal("The ice breaks!") - } - }() - - log.WithFields(logrus.Fields{ - "animal": "walrus", - "number": 8, - }).Debug("Started observing beach") - - log.WithFields(logrus.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") - - log.WithFields(logrus.Fields{ - "omg": true, - "number": 122, - }).Warn("The group's number increased tremendously!") - - log.WithFields(logrus.Fields{ - "temperature": -4, - }).Debug("Temperature changes") - - log.WithFields(logrus.Fields{ - "animal": "orca", - "size": 9009, - }).Panic("It's over 9000!") -} diff --git a/installer/vendor/github.com/Sirupsen/logrus/examples/hook/hook.go b/installer/vendor/github.com/Sirupsen/logrus/examples/hook/hook.go deleted file mode 100644 index 3187f6d3e1..0000000000 --- a/installer/vendor/github.com/Sirupsen/logrus/examples/hook/hook.go +++ /dev/null @@ -1,30 +0,0 @@ -package main - -import ( - "github.com/Sirupsen/logrus" - "gopkg.in/gemnasium/logrus-airbrake-hook.v2" -) - -var log = logrus.New() - -func init() { - log.Formatter = new(logrus.TextFormatter) // default - log.Hooks.Add(airbrake.NewHook(123, "xyz", "development")) -} - -func main() { - log.WithFields(logrus.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") - - log.WithFields(logrus.Fields{ - "omg": true, - "number": 122, - }).Warn("The group's number increased tremendously!") - - log.WithFields(logrus.Fields{ - "omg": true, - "number": 100, - }).Fatal("The ice breaks!") -} diff --git a/installer/vendor/github.com/Sirupsen/logrus/exported.go b/installer/vendor/github.com/Sirupsen/logrus/exported.go index 9a0120ac1d..013183edab 100644 --- a/installer/vendor/github.com/Sirupsen/logrus/exported.go +++ b/installer/vendor/github.com/Sirupsen/logrus/exported.go @@ -31,14 +31,14 @@ func SetFormatter(formatter Formatter) { func SetLevel(level Level) { std.mu.Lock() defer std.mu.Unlock() - std.Level = level + std.SetLevel(level) } // GetLevel returns the standard logger level. func GetLevel() Level { std.mu.Lock() defer std.mu.Unlock() - return std.Level + return std.level() } // AddHook adds a hook to the standard logger hooks. diff --git a/installer/vendor/github.com/Sirupsen/logrus/formatter.go b/installer/vendor/github.com/Sirupsen/logrus/formatter.go index b5fbe934d1..b183ff5b1d 100644 --- a/installer/vendor/github.com/Sirupsen/logrus/formatter.go +++ b/installer/vendor/github.com/Sirupsen/logrus/formatter.go @@ -2,7 +2,7 @@ package logrus import "time" -const DefaultTimestampFormat = time.RFC3339 +const defaultTimestampFormat = time.RFC3339 // The Formatter interface is used to implement a custom Formatter. It takes an // `Entry`. It exposes all the fields, including the default ones: diff --git a/installer/vendor/github.com/Sirupsen/logrus/formatter_bench_test.go b/installer/vendor/github.com/Sirupsen/logrus/formatter_bench_test.go deleted file mode 100644 index c6d290c77f..0000000000 --- a/installer/vendor/github.com/Sirupsen/logrus/formatter_bench_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package logrus - -import ( - "fmt" - "testing" - "time" -) - -// smallFields is a small size data set for benchmarking -var smallFields = Fields{ - "foo": "bar", - "baz": "qux", - "one": "two", - "three": "four", -} - -// largeFields is a large size data set for benchmarking -var largeFields = Fields{ - "foo": "bar", - "baz": "qux", - "one": "two", - "three": "four", - "five": "six", - "seven": "eight", - "nine": "ten", - "eleven": "twelve", - "thirteen": "fourteen", - "fifteen": "sixteen", - "seventeen": "eighteen", - "nineteen": "twenty", - "a": "b", - "c": "d", - "e": "f", - "g": "h", - "i": "j", - "k": "l", - "m": "n", - "o": "p", - "q": "r", - "s": "t", - "u": "v", - "w": "x", - "y": "z", - "this": "will", - "make": "thirty", - "entries": "yeah", -} - -var errorFields = Fields{ - "foo": fmt.Errorf("bar"), - "baz": fmt.Errorf("qux"), -} - -func BenchmarkErrorTextFormatter(b *testing.B) { - doBenchmark(b, &TextFormatter{DisableColors: true}, errorFields) -} - -func BenchmarkSmallTextFormatter(b *testing.B) { - doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields) -} - -func BenchmarkLargeTextFormatter(b *testing.B) { - doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields) -} - -func BenchmarkSmallColoredTextFormatter(b *testing.B) { - doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields) -} - -func BenchmarkLargeColoredTextFormatter(b *testing.B) { - doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields) -} - -func BenchmarkSmallJSONFormatter(b *testing.B) { - doBenchmark(b, &JSONFormatter{}, smallFields) -} - -func BenchmarkLargeJSONFormatter(b *testing.B) { - doBenchmark(b, &JSONFormatter{}, largeFields) -} - -func doBenchmark(b *testing.B, formatter Formatter, fields Fields) { - entry := &Entry{ - Time: time.Time{}, - Level: InfoLevel, - Message: "message", - Data: fields, - } - var d []byte - var err error - for i := 0; i < b.N; i++ { - d, err = formatter.Format(entry) - if err != nil { - b.Fatal(err) - } - b.SetBytes(int64(len(d))) - } -} diff --git a/installer/vendor/github.com/Sirupsen/logrus/hook_test.go b/installer/vendor/github.com/Sirupsen/logrus/hook_test.go deleted file mode 100644 index 13f34cb6f8..0000000000 --- a/installer/vendor/github.com/Sirupsen/logrus/hook_test.go +++ /dev/null @@ -1,122 +0,0 @@ -package logrus - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -type TestHook struct { - Fired bool -} - -func (hook *TestHook) Fire(entry *Entry) error { - hook.Fired = true - return nil -} - -func (hook *TestHook) Levels() []Level { - return []Level{ - DebugLevel, - InfoLevel, - WarnLevel, - ErrorLevel, - FatalLevel, - PanicLevel, - } -} - -func TestHookFires(t *testing.T) { - hook := new(TestHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook) - assert.Equal(t, hook.Fired, false) - - log.Print("test") - }, func(fields Fields) { - assert.Equal(t, hook.Fired, true) - }) -} - -type ModifyHook struct { -} - -func (hook *ModifyHook) Fire(entry *Entry) error { - entry.Data["wow"] = "whale" - return nil -} - -func (hook *ModifyHook) Levels() []Level { - return []Level{ - DebugLevel, - InfoLevel, - WarnLevel, - ErrorLevel, - FatalLevel, - PanicLevel, - } -} - -func TestHookCanModifyEntry(t *testing.T) { - hook := new(ModifyHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook) - log.WithField("wow", "elephant").Print("test") - }, func(fields Fields) { - assert.Equal(t, fields["wow"], "whale") - }) -} - -func TestCanFireMultipleHooks(t *testing.T) { - hook1 := new(ModifyHook) - hook2 := new(TestHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook1) - log.Hooks.Add(hook2) - - log.WithField("wow", "elephant").Print("test") - }, func(fields Fields) { - assert.Equal(t, fields["wow"], "whale") - assert.Equal(t, hook2.Fired, true) - }) -} - -type ErrorHook struct { - Fired bool -} - -func (hook *ErrorHook) Fire(entry *Entry) error { - hook.Fired = true - return nil -} - -func (hook *ErrorHook) Levels() []Level { - return []Level{ - ErrorLevel, - } -} - -func TestErrorHookShouldntFireOnInfo(t *testing.T) { - hook := new(ErrorHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook) - log.Info("test") - }, func(fields Fields) { - assert.Equal(t, hook.Fired, false) - }) -} - -func TestErrorHookShouldFireOnError(t *testing.T) { - hook := new(ErrorHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook) - log.Error("test") - }, func(fields Fields) { - assert.Equal(t, hook.Fired, true) - }) -} diff --git a/installer/vendor/github.com/Sirupsen/logrus/hooks/syslog/README.md b/installer/vendor/github.com/Sirupsen/logrus/hooks/syslog/README.md deleted file mode 100644 index 066704b370..0000000000 --- a/installer/vendor/github.com/Sirupsen/logrus/hooks/syslog/README.md +++ /dev/null @@ -1,39 +0,0 @@ -# Syslog Hooks for Logrus :walrus: - -## Usage - -```go -import ( - "log/syslog" - "github.com/Sirupsen/logrus" - logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" -) - -func main() { - log := logrus.New() - hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") - - if err == nil { - log.Hooks.Add(hook) - } -} -``` - -If you want to connect to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). Just assign empty string to the first two parameters of `NewSyslogHook`. It should look like the following. - -```go -import ( - "log/syslog" - "github.com/Sirupsen/logrus" - logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" -) - -func main() { - log := logrus.New() - hook, err := logrus_syslog.NewSyslogHook("", "", syslog.LOG_INFO, "") - - if err == nil { - log.Hooks.Add(hook) - } -} -``` \ No newline at end of file diff --git a/installer/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go b/installer/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go deleted file mode 100644 index a36e20032e..0000000000 --- a/installer/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog.go +++ /dev/null @@ -1,54 +0,0 @@ -// +build !windows,!nacl,!plan9 - -package logrus_syslog - -import ( - "fmt" - "github.com/Sirupsen/logrus" - "log/syslog" - "os" -) - -// SyslogHook to send logs via syslog. -type SyslogHook struct { - Writer *syslog.Writer - SyslogNetwork string - SyslogRaddr string -} - -// Creates a hook to be added to an instance of logger. This is called with -// `hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_DEBUG, "")` -// `if err == nil { log.Hooks.Add(hook) }` -func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string) (*SyslogHook, error) { - w, err := syslog.Dial(network, raddr, priority, tag) - return &SyslogHook{w, network, raddr}, err -} - -func (hook *SyslogHook) Fire(entry *logrus.Entry) error { - line, err := entry.String() - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err) - return err - } - - switch entry.Level { - case logrus.PanicLevel: - return hook.Writer.Crit(line) - case logrus.FatalLevel: - return hook.Writer.Crit(line) - case logrus.ErrorLevel: - return hook.Writer.Err(line) - case logrus.WarnLevel: - return hook.Writer.Warning(line) - case logrus.InfoLevel: - return hook.Writer.Info(line) - case logrus.DebugLevel: - return hook.Writer.Debug(line) - default: - return nil - } -} - -func (hook *SyslogHook) Levels() []logrus.Level { - return logrus.AllLevels -} diff --git a/installer/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go b/installer/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go deleted file mode 100644 index 42762dc10d..0000000000 --- a/installer/vendor/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package logrus_syslog - -import ( - "github.com/Sirupsen/logrus" - "log/syslog" - "testing" -) - -func TestLocalhostAddAndPrint(t *testing.T) { - log := logrus.New() - hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") - - if err != nil { - t.Errorf("Unable to connect to local syslog.") - } - - log.Hooks.Add(hook) - - for _, level := range hook.Levels() { - if len(log.Hooks[level]) != 1 { - t.Errorf("SyslogHook was not added. The length of log.Hooks[%v]: %v", level, len(log.Hooks[level])) - } - } - - log.Info("Congratulations!") -} diff --git a/installer/vendor/github.com/Sirupsen/logrus/hooks/test/test.go b/installer/vendor/github.com/Sirupsen/logrus/hooks/test/test.go deleted file mode 100644 index 068812535d..0000000000 --- a/installer/vendor/github.com/Sirupsen/logrus/hooks/test/test.go +++ /dev/null @@ -1,67 +0,0 @@ -package test - -import ( - "io/ioutil" - - "github.com/Sirupsen/logrus" -) - -// test.Hook is a hook designed for dealing with logs in test scenarios. -type Hook struct { - Entries []*logrus.Entry -} - -// Installs a test hook for the global logger. -func NewGlobal() *Hook { - - hook := new(Hook) - logrus.AddHook(hook) - - return hook - -} - -// Installs a test hook for a given local logger. -func NewLocal(logger *logrus.Logger) *Hook { - - hook := new(Hook) - logger.Hooks.Add(hook) - - return hook - -} - -// Creates a discarding logger and installs the test hook. -func NewNullLogger() (*logrus.Logger, *Hook) { - - logger := logrus.New() - logger.Out = ioutil.Discard - - return logger, NewLocal(logger) - -} - -func (t *Hook) Fire(e *logrus.Entry) error { - t.Entries = append(t.Entries, e) - return nil -} - -func (t *Hook) Levels() []logrus.Level { - return logrus.AllLevels -} - -// LastEntry returns the last entry that was logged or nil. -func (t *Hook) LastEntry() (l *logrus.Entry) { - - if i := len(t.Entries) - 1; i < 0 { - return nil - } else { - return t.Entries[i] - } - -} - -// Reset removes all Entries from this test hook. -func (t *Hook) Reset() { - t.Entries = make([]*logrus.Entry, 0) -} diff --git a/installer/vendor/github.com/Sirupsen/logrus/hooks/test/test_test.go b/installer/vendor/github.com/Sirupsen/logrus/hooks/test/test_test.go deleted file mode 100644 index d69455ba04..0000000000 --- a/installer/vendor/github.com/Sirupsen/logrus/hooks/test/test_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package test - -import ( - "testing" - - "github.com/Sirupsen/logrus" - "github.com/stretchr/testify/assert" -) - -func TestAllHooks(t *testing.T) { - - assert := assert.New(t) - - logger, hook := NewNullLogger() - assert.Nil(hook.LastEntry()) - assert.Equal(0, len(hook.Entries)) - - logger.Error("Hello error") - assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level) - assert.Equal("Hello error", hook.LastEntry().Message) - assert.Equal(1, len(hook.Entries)) - - logger.Warn("Hello warning") - assert.Equal(logrus.WarnLevel, hook.LastEntry().Level) - assert.Equal("Hello warning", hook.LastEntry().Message) - assert.Equal(2, len(hook.Entries)) - - hook.Reset() - assert.Nil(hook.LastEntry()) - assert.Equal(0, len(hook.Entries)) - - hook = NewGlobal() - - logrus.Error("Hello error") - assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level) - assert.Equal("Hello error", hook.LastEntry().Message) - assert.Equal(1, len(hook.Entries)) - -} diff --git a/installer/vendor/github.com/Sirupsen/logrus/json_formatter.go b/installer/vendor/github.com/Sirupsen/logrus/json_formatter.go index 2ad6dc5cf4..fb01c1b104 100644 --- a/installer/vendor/github.com/Sirupsen/logrus/json_formatter.go +++ b/installer/vendor/github.com/Sirupsen/logrus/json_formatter.go @@ -5,18 +5,54 @@ import ( "fmt" ) +type fieldKey string + +// FieldMap allows customization of the key names for default fields. +type FieldMap map[fieldKey]string + +// Default key names for the default fields +const ( + FieldKeyMsg = "msg" + FieldKeyLevel = "level" + FieldKeyTime = "time" +) + +func (f FieldMap) resolve(key fieldKey) string { + if k, ok := f[key]; ok { + return k + } + + return string(key) +} + +// JSONFormatter formats logs into parsable json type JSONFormatter struct { // TimestampFormat sets the format used for marshaling timestamps. TimestampFormat string + + // DisableTimestamp allows disabling automatic timestamps in output + DisableTimestamp bool + + // FieldMap allows users to customize the names of keys for default fields. + // As an example: + // formatter := &JSONFormatter{ + // FieldMap: FieldMap{ + // FieldKeyTime: "@timestamp", + // FieldKeyLevel: "@level", + // FieldKeyMsg: "@message", + // }, + // } + FieldMap FieldMap } +// Format renders a single log entry func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { data := make(Fields, len(entry.Data)+3) for k, v := range entry.Data { switch v := v.(type) { case error: // Otherwise errors are ignored by `encoding/json` - // https://github.com/Sirupsen/logrus/issues/137 + // https://github.com/sirupsen/logrus/issues/137 data[k] = v.Error() default: data[k] = v @@ -26,12 +62,14 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { timestampFormat := f.TimestampFormat if timestampFormat == "" { - timestampFormat = DefaultTimestampFormat + timestampFormat = defaultTimestampFormat } - data["time"] = entry.Time.Format(timestampFormat) - data["msg"] = entry.Message - data["level"] = entry.Level.String() + if !f.DisableTimestamp { + data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat) + } + data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message + data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() serialized, err := json.Marshal(data) if err != nil { diff --git a/installer/vendor/github.com/Sirupsen/logrus/json_formatter_test.go b/installer/vendor/github.com/Sirupsen/logrus/json_formatter_test.go deleted file mode 100644 index 1d70873254..0000000000 --- a/installer/vendor/github.com/Sirupsen/logrus/json_formatter_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package logrus - -import ( - "encoding/json" - "errors" - - "testing" -) - -func TestErrorNotLost(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("error", errors.New("wild walrus"))) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["error"] != "wild walrus" { - t.Fatal("Error field not set") - } -} - -func TestErrorNotLostOnFieldNotNamedError(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("omg", errors.New("wild walrus"))) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["omg"] != "wild walrus" { - t.Fatal("Error field not set") - } -} - -func TestFieldClashWithTime(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("time", "right now!")) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["fields.time"] != "right now!" { - t.Fatal("fields.time not set to original time field") - } - - if entry["time"] != "0001-01-01T00:00:00Z" { - t.Fatal("time field not set to current time, was: ", entry["time"]) - } -} - -func TestFieldClashWithMsg(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("msg", "something")) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["fields.msg"] != "something" { - t.Fatal("fields.msg not set to original msg field") - } -} - -func TestFieldClashWithLevel(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("level", "something")) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["fields.level"] != "something" { - t.Fatal("fields.level not set to original level field") - } -} - -func TestJSONEntryEndsWithNewline(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("level", "something")) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - if b[len(b)-1] != '\n' { - t.Fatal("Expected JSON log entry to end with a newline") - } -} diff --git a/installer/vendor/github.com/Sirupsen/logrus/logger.go b/installer/vendor/github.com/Sirupsen/logrus/logger.go index b769f3d352..fdaf8a6534 100644 --- a/installer/vendor/github.com/Sirupsen/logrus/logger.go +++ b/installer/vendor/github.com/Sirupsen/logrus/logger.go @@ -4,6 +4,7 @@ import ( "io" "os" "sync" + "sync/atomic" ) type Logger struct { @@ -24,7 +25,7 @@ type Logger struct { Formatter Formatter // The logging level the logger should log at. This is typically (and defaults // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be - // logged. `logrus.Debug` is useful in + // logged. Level Level // Used to sync writing to the log. Locking is enabled by Default mu MutexWrap @@ -112,7 +113,7 @@ func (logger *Logger) WithError(err error) *Entry { } func (logger *Logger) Debugf(format string, args ...interface{}) { - if logger.Level >= DebugLevel { + if logger.level() >= DebugLevel { entry := logger.newEntry() entry.Debugf(format, args...) logger.releaseEntry(entry) @@ -120,7 +121,7 @@ func (logger *Logger) Debugf(format string, args ...interface{}) { } func (logger *Logger) Infof(format string, args ...interface{}) { - if logger.Level >= InfoLevel { + if logger.level() >= InfoLevel { entry := logger.newEntry() entry.Infof(format, args...) logger.releaseEntry(entry) @@ -134,7 +135,7 @@ func (logger *Logger) Printf(format string, args ...interface{}) { } func (logger *Logger) Warnf(format string, args ...interface{}) { - if logger.Level >= WarnLevel { + if logger.level() >= WarnLevel { entry := logger.newEntry() entry.Warnf(format, args...) logger.releaseEntry(entry) @@ -142,7 +143,7 @@ func (logger *Logger) Warnf(format string, args ...interface{}) { } func (logger *Logger) Warningf(format string, args ...interface{}) { - if logger.Level >= WarnLevel { + if logger.level() >= WarnLevel { entry := logger.newEntry() entry.Warnf(format, args...) logger.releaseEntry(entry) @@ -150,7 +151,7 @@ func (logger *Logger) Warningf(format string, args ...interface{}) { } func (logger *Logger) Errorf(format string, args ...interface{}) { - if logger.Level >= ErrorLevel { + if logger.level() >= ErrorLevel { entry := logger.newEntry() entry.Errorf(format, args...) logger.releaseEntry(entry) @@ -158,7 +159,7 @@ func (logger *Logger) Errorf(format string, args ...interface{}) { } func (logger *Logger) Fatalf(format string, args ...interface{}) { - if logger.Level >= FatalLevel { + if logger.level() >= FatalLevel { entry := logger.newEntry() entry.Fatalf(format, args...) logger.releaseEntry(entry) @@ -167,7 +168,7 @@ func (logger *Logger) Fatalf(format string, args ...interface{}) { } func (logger *Logger) Panicf(format string, args ...interface{}) { - if logger.Level >= PanicLevel { + if logger.level() >= PanicLevel { entry := logger.newEntry() entry.Panicf(format, args...) logger.releaseEntry(entry) @@ -175,7 +176,7 @@ func (logger *Logger) Panicf(format string, args ...interface{}) { } func (logger *Logger) Debug(args ...interface{}) { - if logger.Level >= DebugLevel { + if logger.level() >= DebugLevel { entry := logger.newEntry() entry.Debug(args...) logger.releaseEntry(entry) @@ -183,7 +184,7 @@ func (logger *Logger) Debug(args ...interface{}) { } func (logger *Logger) Info(args ...interface{}) { - if logger.Level >= InfoLevel { + if logger.level() >= InfoLevel { entry := logger.newEntry() entry.Info(args...) logger.releaseEntry(entry) @@ -197,7 +198,7 @@ func (logger *Logger) Print(args ...interface{}) { } func (logger *Logger) Warn(args ...interface{}) { - if logger.Level >= WarnLevel { + if logger.level() >= WarnLevel { entry := logger.newEntry() entry.Warn(args...) logger.releaseEntry(entry) @@ -205,7 +206,7 @@ func (logger *Logger) Warn(args ...interface{}) { } func (logger *Logger) Warning(args ...interface{}) { - if logger.Level >= WarnLevel { + if logger.level() >= WarnLevel { entry := logger.newEntry() entry.Warn(args...) logger.releaseEntry(entry) @@ -213,7 +214,7 @@ func (logger *Logger) Warning(args ...interface{}) { } func (logger *Logger) Error(args ...interface{}) { - if logger.Level >= ErrorLevel { + if logger.level() >= ErrorLevel { entry := logger.newEntry() entry.Error(args...) logger.releaseEntry(entry) @@ -221,7 +222,7 @@ func (logger *Logger) Error(args ...interface{}) { } func (logger *Logger) Fatal(args ...interface{}) { - if logger.Level >= FatalLevel { + if logger.level() >= FatalLevel { entry := logger.newEntry() entry.Fatal(args...) logger.releaseEntry(entry) @@ -230,7 +231,7 @@ func (logger *Logger) Fatal(args ...interface{}) { } func (logger *Logger) Panic(args ...interface{}) { - if logger.Level >= PanicLevel { + if logger.level() >= PanicLevel { entry := logger.newEntry() entry.Panic(args...) logger.releaseEntry(entry) @@ -238,7 +239,7 @@ func (logger *Logger) Panic(args ...interface{}) { } func (logger *Logger) Debugln(args ...interface{}) { - if logger.Level >= DebugLevel { + if logger.level() >= DebugLevel { entry := logger.newEntry() entry.Debugln(args...) logger.releaseEntry(entry) @@ -246,7 +247,7 @@ func (logger *Logger) Debugln(args ...interface{}) { } func (logger *Logger) Infoln(args ...interface{}) { - if logger.Level >= InfoLevel { + if logger.level() >= InfoLevel { entry := logger.newEntry() entry.Infoln(args...) logger.releaseEntry(entry) @@ -260,7 +261,7 @@ func (logger *Logger) Println(args ...interface{}) { } func (logger *Logger) Warnln(args ...interface{}) { - if logger.Level >= WarnLevel { + if logger.level() >= WarnLevel { entry := logger.newEntry() entry.Warnln(args...) logger.releaseEntry(entry) @@ -268,7 +269,7 @@ func (logger *Logger) Warnln(args ...interface{}) { } func (logger *Logger) Warningln(args ...interface{}) { - if logger.Level >= WarnLevel { + if logger.level() >= WarnLevel { entry := logger.newEntry() entry.Warnln(args...) logger.releaseEntry(entry) @@ -276,7 +277,7 @@ func (logger *Logger) Warningln(args ...interface{}) { } func (logger *Logger) Errorln(args ...interface{}) { - if logger.Level >= ErrorLevel { + if logger.level() >= ErrorLevel { entry := logger.newEntry() entry.Errorln(args...) logger.releaseEntry(entry) @@ -284,7 +285,7 @@ func (logger *Logger) Errorln(args ...interface{}) { } func (logger *Logger) Fatalln(args ...interface{}) { - if logger.Level >= FatalLevel { + if logger.level() >= FatalLevel { entry := logger.newEntry() entry.Fatalln(args...) logger.releaseEntry(entry) @@ -293,7 +294,7 @@ func (logger *Logger) Fatalln(args ...interface{}) { } func (logger *Logger) Panicln(args ...interface{}) { - if logger.Level >= PanicLevel { + if logger.level() >= PanicLevel { entry := logger.newEntry() entry.Panicln(args...) logger.releaseEntry(entry) @@ -306,3 +307,17 @@ func (logger *Logger) Panicln(args ...interface{}) { func (logger *Logger) SetNoLock() { logger.mu.Disable() } + +func (logger *Logger) level() Level { + return Level(atomic.LoadUint32((*uint32)(&logger.Level))) +} + +func (logger *Logger) SetLevel(level Level) { + atomic.StoreUint32((*uint32)(&logger.Level), uint32(level)) +} + +func (logger *Logger) AddHook(hook Hook) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Hooks.Add(hook) +} diff --git a/installer/vendor/github.com/Sirupsen/logrus/logger_bench_test.go b/installer/vendor/github.com/Sirupsen/logrus/logger_bench_test.go deleted file mode 100644 index dd23a3535e..0000000000 --- a/installer/vendor/github.com/Sirupsen/logrus/logger_bench_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package logrus - -import ( - "os" - "testing" -) - -// smallFields is a small size data set for benchmarking -var loggerFields = Fields{ - "foo": "bar", - "baz": "qux", - "one": "two", - "three": "four", -} - -func BenchmarkDummyLogger(b *testing.B) { - nullf, err := os.OpenFile("/dev/null", os.O_WRONLY, 0666) - if err != nil { - b.Fatalf("%v", err) - } - defer nullf.Close() - doLoggerBenchmark(b, nullf, &TextFormatter{DisableColors: true}, smallFields) -} - -func BenchmarkDummyLoggerNoLock(b *testing.B) { - nullf, err := os.OpenFile("/dev/null", os.O_WRONLY|os.O_APPEND, 0666) - if err != nil { - b.Fatalf("%v", err) - } - defer nullf.Close() - doLoggerBenchmarkNoLock(b, nullf, &TextFormatter{DisableColors: true}, smallFields) -} - -func doLoggerBenchmark(b *testing.B, out *os.File, formatter Formatter, fields Fields) { - logger := Logger{ - Out: out, - Level: InfoLevel, - Formatter: formatter, - } - entry := logger.WithFields(fields) - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - entry.Info("aaa") - } - }) -} - -func doLoggerBenchmarkNoLock(b *testing.B, out *os.File, formatter Formatter, fields Fields) { - logger := Logger{ - Out: out, - Level: InfoLevel, - Formatter: formatter, - } - logger.SetNoLock() - entry := logger.WithFields(fields) - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - entry.Info("aaa") - } - }) -} diff --git a/installer/vendor/github.com/Sirupsen/logrus/logrus.go b/installer/vendor/github.com/Sirupsen/logrus/logrus.go index e596691116..dd38999741 100644 --- a/installer/vendor/github.com/Sirupsen/logrus/logrus.go +++ b/installer/vendor/github.com/Sirupsen/logrus/logrus.go @@ -10,7 +10,7 @@ import ( type Fields map[string]interface{} // Level type -type Level uint8 +type Level uint32 // Convert the Level to a string. E.g. PanicLevel becomes "panic". func (level Level) String() string { diff --git a/installer/vendor/github.com/Sirupsen/logrus/logrus_test.go b/installer/vendor/github.com/Sirupsen/logrus/logrus_test.go deleted file mode 100644 index bfc478055e..0000000000 --- a/installer/vendor/github.com/Sirupsen/logrus/logrus_test.go +++ /dev/null @@ -1,361 +0,0 @@ -package logrus - -import ( - "bytes" - "encoding/json" - "strconv" - "strings" - "sync" - "testing" - - "github.com/stretchr/testify/assert" -) - -func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) { - var buffer bytes.Buffer - var fields Fields - - logger := New() - logger.Out = &buffer - logger.Formatter = new(JSONFormatter) - - log(logger) - - err := json.Unmarshal(buffer.Bytes(), &fields) - assert.Nil(t, err) - - assertions(fields) -} - -func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) { - var buffer bytes.Buffer - - logger := New() - logger.Out = &buffer - logger.Formatter = &TextFormatter{ - DisableColors: true, - } - - log(logger) - - fields := make(map[string]string) - for _, kv := range strings.Split(buffer.String(), " ") { - if !strings.Contains(kv, "=") { - continue - } - kvArr := strings.Split(kv, "=") - key := strings.TrimSpace(kvArr[0]) - val := kvArr[1] - if kvArr[1][0] == '"' { - var err error - val, err = strconv.Unquote(val) - assert.NoError(t, err) - } - fields[key] = val - } - assertions(fields) -} - -func TestPrint(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Print("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - assert.Equal(t, fields["level"], "info") - }) -} - -func TestInfo(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - assert.Equal(t, fields["level"], "info") - }) -} - -func TestWarn(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Warn("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - assert.Equal(t, fields["level"], "warning") - }) -} - -func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Infoln("test", "test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test test") - }) -} - -func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Infoln("test", 10) - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test 10") - }) -} - -func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Infoln(10, 10) - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "10 10") - }) -} - -func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Infoln(10, 10) - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "10 10") - }) -} - -func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Info("test", 10) - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test10") - }) -} - -func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Info("test", "test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "testtest") - }) -} - -func TestWithFieldsShouldAllowAssignments(t *testing.T) { - var buffer bytes.Buffer - var fields Fields - - logger := New() - logger.Out = &buffer - logger.Formatter = new(JSONFormatter) - - localLog := logger.WithFields(Fields{ - "key1": "value1", - }) - - localLog.WithField("key2", "value2").Info("test") - err := json.Unmarshal(buffer.Bytes(), &fields) - assert.Nil(t, err) - - assert.Equal(t, "value2", fields["key2"]) - assert.Equal(t, "value1", fields["key1"]) - - buffer = bytes.Buffer{} - fields = Fields{} - localLog.Info("test") - err = json.Unmarshal(buffer.Bytes(), &fields) - assert.Nil(t, err) - - _, ok := fields["key2"] - assert.Equal(t, false, ok) - assert.Equal(t, "value1", fields["key1"]) -} - -func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.WithField("msg", "hello").Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - }) -} - -func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.WithField("msg", "hello").Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - assert.Equal(t, fields["fields.msg"], "hello") - }) -} - -func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.WithField("time", "hello").Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["fields.time"], "hello") - }) -} - -func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.WithField("level", 1).Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["level"], "info") - assert.Equal(t, fields["fields.level"], 1.0) // JSON has floats only - }) -} - -func TestDefaultFieldsAreNotPrefixed(t *testing.T) { - LogAndAssertText(t, func(log *Logger) { - ll := log.WithField("herp", "derp") - ll.Info("hello") - ll.Info("bye") - }, func(fields map[string]string) { - for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} { - if _, ok := fields[fieldName]; ok { - t.Fatalf("should not have prefixed %q: %v", fieldName, fields) - } - } - }) -} - -func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) { - - var buffer bytes.Buffer - var fields Fields - - logger := New() - logger.Out = &buffer - logger.Formatter = new(JSONFormatter) - - llog := logger.WithField("context", "eating raw fish") - - llog.Info("looks delicious") - - err := json.Unmarshal(buffer.Bytes(), &fields) - assert.NoError(t, err, "should have decoded first message") - assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields") - assert.Equal(t, fields["msg"], "looks delicious") - assert.Equal(t, fields["context"], "eating raw fish") - - buffer.Reset() - - llog.Warn("omg it is!") - - err = json.Unmarshal(buffer.Bytes(), &fields) - assert.NoError(t, err, "should have decoded second message") - assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields") - assert.Equal(t, fields["msg"], "omg it is!") - assert.Equal(t, fields["context"], "eating raw fish") - assert.Nil(t, fields["fields.msg"], "should not have prefixed previous `msg` entry") - -} - -func TestConvertLevelToString(t *testing.T) { - assert.Equal(t, "debug", DebugLevel.String()) - assert.Equal(t, "info", InfoLevel.String()) - assert.Equal(t, "warning", WarnLevel.String()) - assert.Equal(t, "error", ErrorLevel.String()) - assert.Equal(t, "fatal", FatalLevel.String()) - assert.Equal(t, "panic", PanicLevel.String()) -} - -func TestParseLevel(t *testing.T) { - l, err := ParseLevel("panic") - assert.Nil(t, err) - assert.Equal(t, PanicLevel, l) - - l, err = ParseLevel("PANIC") - assert.Nil(t, err) - assert.Equal(t, PanicLevel, l) - - l, err = ParseLevel("fatal") - assert.Nil(t, err) - assert.Equal(t, FatalLevel, l) - - l, err = ParseLevel("FATAL") - assert.Nil(t, err) - assert.Equal(t, FatalLevel, l) - - l, err = ParseLevel("error") - assert.Nil(t, err) - assert.Equal(t, ErrorLevel, l) - - l, err = ParseLevel("ERROR") - assert.Nil(t, err) - assert.Equal(t, ErrorLevel, l) - - l, err = ParseLevel("warn") - assert.Nil(t, err) - assert.Equal(t, WarnLevel, l) - - l, err = ParseLevel("WARN") - assert.Nil(t, err) - assert.Equal(t, WarnLevel, l) - - l, err = ParseLevel("warning") - assert.Nil(t, err) - assert.Equal(t, WarnLevel, l) - - l, err = ParseLevel("WARNING") - assert.Nil(t, err) - assert.Equal(t, WarnLevel, l) - - l, err = ParseLevel("info") - assert.Nil(t, err) - assert.Equal(t, InfoLevel, l) - - l, err = ParseLevel("INFO") - assert.Nil(t, err) - assert.Equal(t, InfoLevel, l) - - l, err = ParseLevel("debug") - assert.Nil(t, err) - assert.Equal(t, DebugLevel, l) - - l, err = ParseLevel("DEBUG") - assert.Nil(t, err) - assert.Equal(t, DebugLevel, l) - - l, err = ParseLevel("invalid") - assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error()) -} - -func TestGetSetLevelRace(t *testing.T) { - wg := sync.WaitGroup{} - for i := 0; i < 100; i++ { - wg.Add(1) - go func(i int) { - defer wg.Done() - if i%2 == 0 { - SetLevel(InfoLevel) - } else { - GetLevel() - } - }(i) - - } - wg.Wait() -} - -func TestLoggingRace(t *testing.T) { - logger := New() - - var wg sync.WaitGroup - wg.Add(100) - - for i := 0; i < 100; i++ { - go func() { - logger.Info("info") - wg.Done() - }() - } - wg.Wait() -} - -// Compile test -func TestLogrusInterface(t *testing.T) { - var buffer bytes.Buffer - fn := func(l FieldLogger) { - b := l.WithField("key", "value") - b.Debug("Test") - } - // test logger - logger := New() - logger.Out = &buffer - fn(logger) - - // test Entry - e := logger.WithField("another", "value") - fn(e) -} diff --git a/installer/vendor/github.com/Sirupsen/logrus/terminal_appengine.go b/installer/vendor/github.com/Sirupsen/logrus/terminal_appengine.go deleted file mode 100644 index 1960169ef2..0000000000 --- a/installer/vendor/github.com/Sirupsen/logrus/terminal_appengine.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build appengine - -package logrus - -// IsTerminal returns true if stderr's file descriptor is a terminal. -func IsTerminal() bool { - return true -} diff --git a/installer/vendor/github.com/Sirupsen/logrus/terminal_bsd.go b/installer/vendor/github.com/Sirupsen/logrus/terminal_bsd.go index 5f6be4d3c0..4880d13d26 100644 --- a/installer/vendor/github.com/Sirupsen/logrus/terminal_bsd.go +++ b/installer/vendor/github.com/Sirupsen/logrus/terminal_bsd.go @@ -1,10 +1,10 @@ // +build darwin freebsd openbsd netbsd dragonfly -// +build !appengine +// +build !appengine,!gopherjs package logrus -import "syscall" +import "golang.org/x/sys/unix" -const ioctlReadTermios = syscall.TIOCGETA +const ioctlReadTermios = unix.TIOCGETA -type Termios syscall.Termios +type Termios unix.Termios diff --git a/installer/vendor/github.com/Sirupsen/logrus/terminal_check_appengine.go b/installer/vendor/github.com/Sirupsen/logrus/terminal_check_appengine.go new file mode 100644 index 0000000000..3de08e802f --- /dev/null +++ b/installer/vendor/github.com/Sirupsen/logrus/terminal_check_appengine.go @@ -0,0 +1,11 @@ +// +build appengine gopherjs + +package logrus + +import ( + "io" +) + +func checkIfTerminal(w io.Writer) bool { + return true +} diff --git a/installer/vendor/github.com/Sirupsen/logrus/terminal_check_notappengine.go b/installer/vendor/github.com/Sirupsen/logrus/terminal_check_notappengine.go new file mode 100644 index 0000000000..067047a123 --- /dev/null +++ b/installer/vendor/github.com/Sirupsen/logrus/terminal_check_notappengine.go @@ -0,0 +1,19 @@ +// +build !appengine,!gopherjs + +package logrus + +import ( + "io" + "os" + + "golang.org/x/crypto/ssh/terminal" +) + +func checkIfTerminal(w io.Writer) bool { + switch v := w.(type) { + case *os.File: + return terminal.IsTerminal(int(v.Fd())) + default: + return false + } +} diff --git a/installer/vendor/github.com/Sirupsen/logrus/terminal_linux.go b/installer/vendor/github.com/Sirupsen/logrus/terminal_linux.go index 308160ca80..f29a0097c8 100644 --- a/installer/vendor/github.com/Sirupsen/logrus/terminal_linux.go +++ b/installer/vendor/github.com/Sirupsen/logrus/terminal_linux.go @@ -3,12 +3,12 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !appengine +// +build !appengine,!gopherjs package logrus -import "syscall" +import "golang.org/x/sys/unix" -const ioctlReadTermios = syscall.TCGETS +const ioctlReadTermios = unix.TCGETS -type Termios syscall.Termios +type Termios unix.Termios diff --git a/installer/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/installer/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go deleted file mode 100644 index 329038f6ca..0000000000 --- a/installer/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go +++ /dev/null @@ -1,22 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux darwin freebsd openbsd netbsd dragonfly -// +build !appengine - -package logrus - -import ( - "syscall" - "unsafe" -) - -// IsTerminal returns true if stderr's file descriptor is a terminal. -func IsTerminal() bool { - fd := syscall.Stderr - var termios Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 -} diff --git a/installer/vendor/github.com/Sirupsen/logrus/terminal_solaris.go b/installer/vendor/github.com/Sirupsen/logrus/terminal_solaris.go deleted file mode 100644 index a3c6f6e7df..0000000000 --- a/installer/vendor/github.com/Sirupsen/logrus/terminal_solaris.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build solaris,!appengine - -package logrus - -import ( - "os" - - "golang.org/x/sys/unix" -) - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal() bool { - _, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA) - return err == nil -} diff --git a/installer/vendor/github.com/Sirupsen/logrus/terminal_windows.go b/installer/vendor/github.com/Sirupsen/logrus/terminal_windows.go deleted file mode 100644 index 3727e8adfb..0000000000 --- a/installer/vendor/github.com/Sirupsen/logrus/terminal_windows.go +++ /dev/null @@ -1,27 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows,!appengine - -package logrus - -import ( - "syscall" - "unsafe" -) - -var kernel32 = syscall.NewLazyDLL("kernel32.dll") - -var ( - procGetConsoleMode = kernel32.NewProc("GetConsoleMode") -) - -// IsTerminal returns true if stderr's file descriptor is a terminal. -func IsTerminal() bool { - fd := syscall.Stderr - var st uint32 - r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) - return r != 0 && e == 0 -} diff --git a/installer/vendor/github.com/Sirupsen/logrus/text_formatter.go b/installer/vendor/github.com/Sirupsen/logrus/text_formatter.go index 9114b3ca47..61b21caea4 100644 --- a/installer/vendor/github.com/Sirupsen/logrus/text_formatter.go +++ b/installer/vendor/github.com/Sirupsen/logrus/text_formatter.go @@ -3,9 +3,9 @@ package logrus import ( "bytes" "fmt" - "runtime" "sort" "strings" + "sync" "time" ) @@ -14,24 +14,19 @@ const ( red = 31 green = 32 yellow = 33 - blue = 34 + blue = 36 gray = 37 ) var ( baseTimestamp time.Time - isTerminal bool ) func init() { baseTimestamp = time.Now() - isTerminal = IsTerminal() -} - -func miniTS() int { - return int(time.Since(baseTimestamp) / time.Second) } +// TextFormatter formats logs into text type TextFormatter struct { // Set to true to bypass checking for a TTY before outputting colors. ForceColors bool @@ -54,11 +49,26 @@ type TextFormatter struct { // that log extremely frequently and don't use the JSON formatter this may not // be desired. DisableSorting bool + + // QuoteEmptyFields will wrap empty fields in quotes if true + QuoteEmptyFields bool + + // Whether the logger's out is to a terminal + isTerminal bool + + sync.Once } +func (f *TextFormatter) init(entry *Entry) { + if entry.Logger != nil { + f.isTerminal = checkIfTerminal(entry.Logger.Out) + } +} + +// Format renders a single log entry func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { var b *bytes.Buffer - var keys []string = make([]string, 0, len(entry.Data)) + keys := make([]string, 0, len(entry.Data)) for k := range entry.Data { keys = append(keys, k) } @@ -74,12 +84,13 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { prefixFieldClashes(entry.Data) - isColorTerminal := isTerminal && (runtime.GOOS != "windows") - isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors + f.Do(func() { f.init(entry) }) + + isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors timestampFormat := f.TimestampFormat if timestampFormat == "" { - timestampFormat = DefaultTimestampFormat + timestampFormat = defaultTimestampFormat } if isColored { f.printColored(b, entry, keys, timestampFormat) @@ -115,8 +126,10 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin levelText := strings.ToUpper(entry.Level.String())[0:4] - if !f.FullTimestamp { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message) + if f.DisableTimestamp { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message) + } else if !f.FullTimestamp { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message) } else { fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message) } @@ -127,12 +140,15 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin } } -func needsQuoting(text string) bool { +func (f *TextFormatter) needsQuoting(text string) bool { + if f.QuoteEmptyFields && len(text) == 0 { + return true + } for _, ch := range text { if !((ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || (ch >= '0' && ch <= '9') || - ch == '-' || ch == '.') { + ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') { return true } } @@ -140,29 +156,23 @@ func needsQuoting(text string) bool { } func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { - + if b.Len() > 0 { + b.WriteByte(' ') + } b.WriteString(key) b.WriteByte('=') f.appendValue(b, value) - b.WriteByte(' ') } func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) { - switch value := value.(type) { - case string: - if !needsQuoting(value) { - b.WriteString(value) - } else { - fmt.Fprintf(b, "%q", value) - } - case error: - errmsg := value.Error() - if !needsQuoting(errmsg) { - b.WriteString(errmsg) - } else { - fmt.Fprintf(b, "%q", errmsg) - } - default: - fmt.Fprint(b, value) + stringVal, ok := value.(string) + if !ok { + stringVal = fmt.Sprint(value) + } + + if !f.needsQuoting(stringVal) { + b.WriteString(stringVal) + } else { + b.WriteString(fmt.Sprintf("%q", stringVal)) } } diff --git a/installer/vendor/github.com/Sirupsen/logrus/text_formatter_test.go b/installer/vendor/github.com/Sirupsen/logrus/text_formatter_test.go deleted file mode 100644 index e25a44f67b..0000000000 --- a/installer/vendor/github.com/Sirupsen/logrus/text_formatter_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package logrus - -import ( - "bytes" - "errors" - "testing" - "time" -) - -func TestQuoting(t *testing.T) { - tf := &TextFormatter{DisableColors: true} - - checkQuoting := func(q bool, value interface{}) { - b, _ := tf.Format(WithField("test", value)) - idx := bytes.Index(b, ([]byte)("test=")) - cont := bytes.Contains(b[idx+5:], []byte{'"'}) - if cont != q { - if q { - t.Errorf("quoting expected for: %#v", value) - } else { - t.Errorf("quoting not expected for: %#v", value) - } - } - } - - checkQuoting(false, "abcd") - checkQuoting(false, "v1.0") - checkQuoting(false, "1234567890") - checkQuoting(true, "/foobar") - checkQuoting(true, "x y") - checkQuoting(true, "x,y") - checkQuoting(false, errors.New("invalid")) - checkQuoting(true, errors.New("invalid argument")) -} - -func TestTimestampFormat(t *testing.T) { - checkTimeStr := func(format string) { - customFormatter := &TextFormatter{DisableColors: true, TimestampFormat: format} - customStr, _ := customFormatter.Format(WithField("test", "test")) - timeStart := bytes.Index(customStr, ([]byte)("time=")) - timeEnd := bytes.Index(customStr, ([]byte)("level=")) - timeStr := customStr[timeStart+5 : timeEnd-1] - if timeStr[0] == '"' && timeStr[len(timeStr)-1] == '"' { - timeStr = timeStr[1 : len(timeStr)-1] - } - if format == "" { - format = time.RFC3339 - } - _, e := time.Parse(format, (string)(timeStr)) - if e != nil { - t.Errorf("time string \"%s\" did not match provided time format \"%s\": %s", timeStr, format, e) - } - } - - checkTimeStr("2006-01-02T15:04:05.000000000Z07:00") - checkTimeStr("Mon Jan _2 15:04:05 2006") - checkTimeStr("") -} - -// TODO add tests for sorting etc., this requires a parser for the text -// formatter output. diff --git a/installer/vendor/github.com/Sirupsen/logrus/writer.go b/installer/vendor/github.com/Sirupsen/logrus/writer.go index f74d2aa5fc..7bdebedc60 100644 --- a/installer/vendor/github.com/Sirupsen/logrus/writer.go +++ b/installer/vendor/github.com/Sirupsen/logrus/writer.go @@ -11,39 +11,48 @@ func (logger *Logger) Writer() *io.PipeWriter { } func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { + return NewEntry(logger).WriterLevel(level) +} + +func (entry *Entry) Writer() *io.PipeWriter { + return entry.WriterLevel(InfoLevel) +} + +func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { reader, writer := io.Pipe() var printFunc func(args ...interface{}) + switch level { case DebugLevel: - printFunc = logger.Debug + printFunc = entry.Debug case InfoLevel: - printFunc = logger.Info + printFunc = entry.Info case WarnLevel: - printFunc = logger.Warn + printFunc = entry.Warn case ErrorLevel: - printFunc = logger.Error + printFunc = entry.Error case FatalLevel: - printFunc = logger.Fatal + printFunc = entry.Fatal case PanicLevel: - printFunc = logger.Panic + printFunc = entry.Panic default: - printFunc = logger.Print + printFunc = entry.Print } - go logger.writerScanner(reader, printFunc) + go entry.writerScanner(reader, printFunc) runtime.SetFinalizer(writer, writerFinalizer) return writer } -func (logger *Logger) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { +func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { scanner := bufio.NewScanner(reader) for scanner.Scan() { printFunc(scanner.Text()) } if err := scanner.Err(); err != nil { - logger.Errorf("Error while reading from Writer: %s", err) + entry.Errorf("Error while reading from Writer: %s", err) } reader.Close() } diff --git a/installer/vendor/github.com/armon/go-radix/.gitignore b/installer/vendor/github.com/armon/go-radix/.gitignore deleted file mode 100644 index 00268614f0..0000000000 --- a/installer/vendor/github.com/armon/go-radix/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe diff --git a/installer/vendor/github.com/armon/go-radix/.travis.yml b/installer/vendor/github.com/armon/go-radix/.travis.yml deleted file mode 100644 index 1a0bbea6c7..0000000000 --- a/installer/vendor/github.com/armon/go-radix/.travis.yml +++ /dev/null @@ -1,3 +0,0 @@ -language: go -go: - - tip diff --git a/installer/vendor/github.com/armon/go-radix/README.md b/installer/vendor/github.com/armon/go-radix/README.md deleted file mode 100644 index 26f42a2837..0000000000 --- a/installer/vendor/github.com/armon/go-radix/README.md +++ /dev/null @@ -1,38 +0,0 @@ -go-radix [![Build Status](https://travis-ci.org/armon/go-radix.png)](https://travis-ci.org/armon/go-radix) -========= - -Provides the `radix` package that implements a [radix tree](http://en.wikipedia.org/wiki/Radix_tree). -The package only provides a single `Tree` implementation, optimized for sparse nodes. - -As a radix tree, it provides the following: - * O(k) operations. In many cases, this can be faster than a hash table since - the hash function is an O(k) operation, and hash tables have very poor cache locality. - * Minimum / Maximum value lookups - * Ordered iteration - -For an immutable variant, see [go-immutable-radix](https://github.com/hashicorp/go-immutable-radix). - -Documentation -============= - -The full documentation is available on [Godoc](http://godoc.org/github.com/armon/go-radix). - -Example -======= - -Below is a simple example of usage - -```go -// Create a tree -r := radix.New() -r.Insert("foo", 1) -r.Insert("bar", 2) -r.Insert("foobar", 2) - -// Find the longest prefix match -m, _, _ := r.LongestPrefix("foozip") -if m != "foo" { - panic("should be foo") -} -``` - diff --git a/installer/vendor/github.com/armon/go-radix/radix_test.go b/installer/vendor/github.com/armon/go-radix/radix_test.go deleted file mode 100644 index a7a4eab332..0000000000 --- a/installer/vendor/github.com/armon/go-radix/radix_test.go +++ /dev/null @@ -1,359 +0,0 @@ -package radix - -import ( - crand "crypto/rand" - "fmt" - "reflect" - "sort" - "testing" -) - -func TestRadix(t *testing.T) { - var min, max string - inp := make(map[string]interface{}) - for i := 0; i < 1000; i++ { - gen := generateUUID() - inp[gen] = i - if gen < min || i == 0 { - min = gen - } - if gen > max || i == 0 { - max = gen - } - } - - r := NewFromMap(inp) - if r.Len() != len(inp) { - t.Fatalf("bad length: %v %v", r.Len(), len(inp)) - } - - r.Walk(func(k string, v interface{}) bool { - println(k) - return false - }) - - for k, v := range inp { - out, ok := r.Get(k) - if !ok { - t.Fatalf("missing key: %v", k) - } - if out != v { - t.Fatalf("value mis-match: %v %v", out, v) - } - } - - // Check min and max - outMin, _, _ := r.Minimum() - if outMin != min { - t.Fatalf("bad minimum: %v %v", outMin, min) - } - outMax, _, _ := r.Maximum() - if outMax != max { - t.Fatalf("bad maximum: %v %v", outMax, max) - } - - for k, v := range inp { - out, ok := r.Delete(k) - if !ok { - t.Fatalf("missing key: %v", k) - } - if out != v { - t.Fatalf("value mis-match: %v %v", out, v) - } - } - if r.Len() != 0 { - t.Fatalf("bad length: %v", r.Len()) - } -} - -func TestRoot(t *testing.T) { - r := New() - _, ok := r.Delete("") - if ok { - t.Fatalf("bad") - } - _, ok = r.Insert("", true) - if ok { - t.Fatalf("bad") - } - val, ok := r.Get("") - if !ok || val != true { - t.Fatalf("bad: %v", val) - } - val, ok = r.Delete("") - if !ok || val != true { - t.Fatalf("bad: %v", val) - } -} - -func TestDelete(t *testing.T) { - - r := New() - - s := []string{"", "A", "AB"} - - for _, ss := range s { - r.Insert(ss, true) - } - - for _, ss := range s { - _, ok := r.Delete(ss) - if !ok { - t.Fatalf("bad %q", ss) - } - } -} - -func TestDeletePrefix(t *testing.T) { - type exp struct { - inp[] string - prefix string - out[] string - numDeleted int - } - - cases := []exp{ - {[]string{"", "A", "AB", "ABC", "R", "S"}, "A", []string{"", "R", "S"}, 3}, - {[]string{"", "A", "AB", "ABC", "R", "S"}, "ABC", []string{"", "A", "AB", "R", "S"}, 1}, - {[]string{"", "A", "AB", "ABC", "R", "S"}, "", []string{}, 6}, - {[]string{"", "A", "AB", "ABC", "R", "S"}, "S", []string{"", "A", "AB", "ABC", "R"}, 1}, - {[]string{"", "A", "AB", "ABC", "R", "S"}, "SS", []string{"", "A", "AB", "ABC", "R", "S"}, 0}, - } - - for _, test := range cases { - r := New() - for _, ss := range test.inp { - r.Insert(ss, true) - } - - deleted := r.DeletePrefix(test.prefix) - if deleted != test.numDeleted { - t.Fatalf("Bad delete, expected %v to be deleted but got %v", test.numDeleted, deleted) - } - - out := []string{} - fn := func(s string, v interface{}) bool { - out = append(out, s) - return false - } - r.Walk(fn) - - if !reflect.DeepEqual(out, test.out) { - t.Fatalf("mis-match: %v %v", out, test.out) - } - } -} - -func TestLongestPrefix(t *testing.T) { - r := New() - - keys := []string{ - "", - "foo", - "foobar", - "foobarbaz", - "foobarbazzip", - "foozip", - } - for _, k := range keys { - r.Insert(k, nil) - } - if r.Len() != len(keys) { - t.Fatalf("bad len: %v %v", r.Len(), len(keys)) - } - - type exp struct { - inp string - out string - } - cases := []exp{ - {"a", ""}, - {"abc", ""}, - {"fo", ""}, - {"foo", "foo"}, - {"foob", "foo"}, - {"foobar", "foobar"}, - {"foobarba", "foobar"}, - {"foobarbaz", "foobarbaz"}, - {"foobarbazzi", "foobarbaz"}, - {"foobarbazzip", "foobarbazzip"}, - {"foozi", "foo"}, - {"foozip", "foozip"}, - {"foozipzap", "foozip"}, - } - for _, test := range cases { - m, _, ok := r.LongestPrefix(test.inp) - if !ok { - t.Fatalf("no match: %v", test) - } - if m != test.out { - t.Fatalf("mis-match: %v %v", m, test) - } - } -} - -func TestWalkPrefix(t *testing.T) { - r := New() - - keys := []string{ - "foobar", - "foo/bar/baz", - "foo/baz/bar", - "foo/zip/zap", - "zipzap", - } - for _, k := range keys { - r.Insert(k, nil) - } - if r.Len() != len(keys) { - t.Fatalf("bad len: %v %v", r.Len(), len(keys)) - } - - type exp struct { - inp string - out []string - } - cases := []exp{ - { - "f", - []string{"foobar", "foo/bar/baz", "foo/baz/bar", "foo/zip/zap"}, - }, - { - "foo", - []string{"foobar", "foo/bar/baz", "foo/baz/bar", "foo/zip/zap"}, - }, - { - "foob", - []string{"foobar"}, - }, - { - "foo/", - []string{"foo/bar/baz", "foo/baz/bar", "foo/zip/zap"}, - }, - { - "foo/b", - []string{"foo/bar/baz", "foo/baz/bar"}, - }, - { - "foo/ba", - []string{"foo/bar/baz", "foo/baz/bar"}, - }, - { - "foo/bar", - []string{"foo/bar/baz"}, - }, - { - "foo/bar/baz", - []string{"foo/bar/baz"}, - }, - { - "foo/bar/bazoo", - []string{}, - }, - { - "z", - []string{"zipzap"}, - }, - } - - for _, test := range cases { - out := []string{} - fn := func(s string, v interface{}) bool { - out = append(out, s) - return false - } - r.WalkPrefix(test.inp, fn) - sort.Strings(out) - sort.Strings(test.out) - if !reflect.DeepEqual(out, test.out) { - t.Fatalf("mis-match: %v %v", out, test.out) - } - } -} - -func TestWalkPath(t *testing.T) { - r := New() - - keys := []string{ - "foo", - "foo/bar", - "foo/bar/baz", - "foo/baz/bar", - "foo/zip/zap", - "zipzap", - } - for _, k := range keys { - r.Insert(k, nil) - } - if r.Len() != len(keys) { - t.Fatalf("bad len: %v %v", r.Len(), len(keys)) - } - - type exp struct { - inp string - out []string - } - cases := []exp{ - { - "f", - []string{}, - }, - { - "foo", - []string{"foo"}, - }, - { - "foo/", - []string{"foo"}, - }, - { - "foo/ba", - []string{"foo"}, - }, - { - "foo/bar", - []string{"foo", "foo/bar"}, - }, - { - "foo/bar/baz", - []string{"foo", "foo/bar", "foo/bar/baz"}, - }, - { - "foo/bar/bazoo", - []string{"foo", "foo/bar", "foo/bar/baz"}, - }, - { - "z", - []string{}, - }, - } - - for _, test := range cases { - out := []string{} - fn := func(s string, v interface{}) bool { - out = append(out, s) - return false - } - r.WalkPath(test.inp, fn) - sort.Strings(out) - sort.Strings(test.out) - if !reflect.DeepEqual(out, test.out) { - t.Fatalf("mis-match: %v %v", out, test.out) - } - } -} - -// generateUUID is used to generate a random UUID -func generateUUID() string { - buf := make([]byte, 16) - if _, err := crand.Read(buf); err != nil { - panic(fmt.Errorf("failed to read random bytes: %v", err)) - } - - return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x", - buf[0:4], - buf[4:6], - buf[6:8], - buf[8:10], - buf[10:16]) -} diff --git a/installer/vendor/github.com/boltdb/bolt/.gitignore b/installer/vendor/github.com/boltdb/bolt/.gitignore deleted file mode 100644 index c7bd2b7a5b..0000000000 --- a/installer/vendor/github.com/boltdb/bolt/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -*.prof -*.test -*.swp -/bin/ diff --git a/installer/vendor/github.com/boltdb/bolt/Makefile b/installer/vendor/github.com/boltdb/bolt/Makefile deleted file mode 100644 index e035e63adc..0000000000 --- a/installer/vendor/github.com/boltdb/bolt/Makefile +++ /dev/null @@ -1,18 +0,0 @@ -BRANCH=`git rev-parse --abbrev-ref HEAD` -COMMIT=`git rev-parse --short HEAD` -GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" - -default: build - -race: - @go test -v -race -test.run="TestSimulate_(100op|1000op)" - -# go get github.com/kisielk/errcheck -errcheck: - @errcheck -ignorepkg=bytes -ignore=os:Remove github.com/boltdb/bolt - -test: - @go test -v -cover . - @go test -v ./cmd/bolt - -.PHONY: fmt test diff --git a/installer/vendor/github.com/boltdb/bolt/README.md b/installer/vendor/github.com/boltdb/bolt/README.md deleted file mode 100644 index 7d43a15b2c..0000000000 --- a/installer/vendor/github.com/boltdb/bolt/README.md +++ /dev/null @@ -1,916 +0,0 @@ -Bolt [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.2.1-green.svg) -==== - -Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas] -[LMDB project][lmdb]. The goal of the project is to provide a simple, -fast, and reliable database for projects that don't require a full database -server such as Postgres or MySQL. - -Since Bolt is meant to be used as such a low-level piece of functionality, -simplicity is key. The API will be small and only focus on getting values -and setting values. That's it. - -[hyc_symas]: https://twitter.com/hyc_symas -[lmdb]: http://symas.com/mdb/ - -## Project Status - -Bolt is stable, the API is fixed, and the file format is fixed. Full unit -test coverage and randomized black box testing are used to ensure database -consistency and thread safety. Bolt is currently used in high-load production -environments serving databases as large as 1TB. Many companies such as -Shopify and Heroku use Bolt-backed services every day. - -## Table of Contents - -- [Getting Started](#getting-started) - - [Installing](#installing) - - [Opening a database](#opening-a-database) - - [Transactions](#transactions) - - [Read-write transactions](#read-write-transactions) - - [Read-only transactions](#read-only-transactions) - - [Batch read-write transactions](#batch-read-write-transactions) - - [Managing transactions manually](#managing-transactions-manually) - - [Using buckets](#using-buckets) - - [Using key/value pairs](#using-keyvalue-pairs) - - [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket) - - [Iterating over keys](#iterating-over-keys) - - [Prefix scans](#prefix-scans) - - [Range scans](#range-scans) - - [ForEach()](#foreach) - - [Nested buckets](#nested-buckets) - - [Database backups](#database-backups) - - [Statistics](#statistics) - - [Read-Only Mode](#read-only-mode) - - [Mobile Use (iOS/Android)](#mobile-use-iosandroid) -- [Resources](#resources) -- [Comparison with other databases](#comparison-with-other-databases) - - [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases) - - [LevelDB, RocksDB](#leveldb-rocksdb) - - [LMDB](#lmdb) -- [Caveats & Limitations](#caveats--limitations) -- [Reading the Source](#reading-the-source) -- [Other Projects Using Bolt](#other-projects-using-bolt) - -## Getting Started - -### Installing - -To start using Bolt, install Go and run `go get`: - -```sh -$ go get github.com/boltdb/bolt/... -``` - -This will retrieve the library and install the `bolt` command line utility into -your `$GOBIN` path. - - -### Opening a database - -The top-level object in Bolt is a `DB`. It is represented as a single file on -your disk and represents a consistent snapshot of your data. - -To open your database, simply use the `bolt.Open()` function: - -```go -package main - -import ( - "log" - - "github.com/boltdb/bolt" -) - -func main() { - // Open the my.db data file in your current directory. - // It will be created if it doesn't exist. - db, err := bolt.Open("my.db", 0600, nil) - if err != nil { - log.Fatal(err) - } - defer db.Close() - - ... -} -``` - -Please note that Bolt obtains a file lock on the data file so multiple processes -cannot open the same database at the same time. Opening an already open Bolt -database will cause it to hang until the other process closes it. To prevent -an indefinite wait you can pass a timeout option to the `Open()` function: - -```go -db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second}) -``` - - -### Transactions - -Bolt allows only one read-write transaction at a time but allows as many -read-only transactions as you want at a time. Each transaction has a consistent -view of the data as it existed when the transaction started. - -Individual transactions and all objects created from them (e.g. buckets, keys) -are not thread safe. To work with data in multiple goroutines you must start -a transaction for each one or use locking to ensure only one goroutine accesses -a transaction at a time. Creating transaction from the `DB` is thread safe. - -Read-only transactions and read-write transactions should not depend on one -another and generally shouldn't be opened simultaneously in the same goroutine. -This can cause a deadlock as the read-write transaction needs to periodically -re-map the data file but it cannot do so while a read-only transaction is open. - - -#### Read-write transactions - -To start a read-write transaction, you can use the `DB.Update()` function: - -```go -err := db.Update(func(tx *bolt.Tx) error { - ... - return nil -}) -``` - -Inside the closure, you have a consistent view of the database. You commit the -transaction by returning `nil` at the end. You can also rollback the transaction -at any point by returning an error. All database operations are allowed inside -a read-write transaction. - -Always check the return error as it will report any disk failures that can cause -your transaction to not complete. If you return an error within your closure -it will be passed through. - - -#### Read-only transactions - -To start a read-only transaction, you can use the `DB.View()` function: - -```go -err := db.View(func(tx *bolt.Tx) error { - ... - return nil -}) -``` - -You also get a consistent view of the database within this closure, however, -no mutating operations are allowed within a read-only transaction. You can only -retrieve buckets, retrieve values, and copy the database within a read-only -transaction. - - -#### Batch read-write transactions - -Each `DB.Update()` waits for disk to commit the writes. This overhead -can be minimized by combining multiple updates with the `DB.Batch()` -function: - -```go -err := db.Batch(func(tx *bolt.Tx) error { - ... - return nil -}) -``` - -Concurrent Batch calls are opportunistically combined into larger -transactions. Batch is only useful when there are multiple goroutines -calling it. - -The trade-off is that `Batch` can call the given -function multiple times, if parts of the transaction fail. The -function must be idempotent and side effects must take effect only -after a successful return from `DB.Batch()`. - -For example: don't display messages from inside the function, instead -set variables in the enclosing scope: - -```go -var id uint64 -err := db.Batch(func(tx *bolt.Tx) error { - // Find last key in bucket, decode as bigendian uint64, increment - // by one, encode back to []byte, and add new key. - ... - id = newValue - return nil -}) -if err != nil { - return ... -} -fmt.Println("Allocated ID %d", id) -``` - - -#### Managing transactions manually - -The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()` -function. These helper functions will start the transaction, execute a function, -and then safely close your transaction if an error is returned. This is the -recommended way to use Bolt transactions. - -However, sometimes you may want to manually start and end your transactions. -You can use the `DB.Begin()` function directly but **please** be sure to close -the transaction. - -```go -// Start a writable transaction. -tx, err := db.Begin(true) -if err != nil { - return err -} -defer tx.Rollback() - -// Use the transaction... -_, err := tx.CreateBucket([]byte("MyBucket")) -if err != nil { - return err -} - -// Commit the transaction and check for error. -if err := tx.Commit(); err != nil { - return err -} -``` - -The first argument to `DB.Begin()` is a boolean stating if the transaction -should be writable. - - -### Using buckets - -Buckets are collections of key/value pairs within the database. All keys in a -bucket must be unique. You can create a bucket using the `DB.CreateBucket()` -function: - -```go -db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("MyBucket")) - if err != nil { - return fmt.Errorf("create bucket: %s", err) - } - return nil -}) -``` - -You can also create a bucket only if it doesn't exist by using the -`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this -function for all your top-level buckets after you open your database so you can -guarantee that they exist for future transactions. - -To delete a bucket, simply call the `Tx.DeleteBucket()` function. - - -### Using key/value pairs - -To save a key/value pair to a bucket, use the `Bucket.Put()` function: - -```go -db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("MyBucket")) - err := b.Put([]byte("answer"), []byte("42")) - return err -}) -``` - -This will set the value of the `"answer"` key to `"42"` in the `MyBucket` -bucket. To retrieve this value, we can use the `Bucket.Get()` function: - -```go -db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("MyBucket")) - v := b.Get([]byte("answer")) - fmt.Printf("The answer is: %s\n", v) - return nil -}) -``` - -The `Get()` function does not return an error because its operation is -guaranteed to work (unless there is some kind of system failure). If the key -exists then it will return its byte slice value. If it doesn't exist then it -will return `nil`. It's important to note that you can have a zero-length value -set to a key which is different than the key not existing. - -Use the `Bucket.Delete()` function to delete a key from the bucket. - -Please note that values returned from `Get()` are only valid while the -transaction is open. If you need to use a value outside of the transaction -then you must use `copy()` to copy it to another byte slice. - - -### Autoincrementing integer for the bucket -By using the `NextSequence()` function, you can let Bolt determine a sequence -which can be used as the unique identifier for your key/value pairs. See the -example below. - -```go -// CreateUser saves u to the store. The new user ID is set on u once the data is persisted. -func (s *Store) CreateUser(u *User) error { - return s.db.Update(func(tx *bolt.Tx) error { - // Retrieve the users bucket. - // This should be created when the DB is first opened. - b := tx.Bucket([]byte("users")) - - // Generate ID for the user. - // This returns an error only if the Tx is closed or not writeable. - // That can't happen in an Update() call so I ignore the error check. - id, _ := b.NextSequence() - u.ID = int(id) - - // Marshal user data into bytes. - buf, err := json.Marshal(u) - if err != nil { - return err - } - - // Persist bytes to users bucket. - return b.Put(itob(u.ID), buf) - }) -} - -// itob returns an 8-byte big endian representation of v. -func itob(v int) []byte { - b := make([]byte, 8) - binary.BigEndian.PutUint64(b, uint64(v)) - return b -} - -type User struct { - ID int - ... -} -``` - -### Iterating over keys - -Bolt stores its keys in byte-sorted order within a bucket. This makes sequential -iteration over these keys extremely fast. To iterate over keys we'll use a -`Cursor`: - -```go -db.View(func(tx *bolt.Tx) error { - // Assume bucket exists and has keys - b := tx.Bucket([]byte("MyBucket")) - - c := b.Cursor() - - for k, v := c.First(); k != nil; k, v = c.Next() { - fmt.Printf("key=%s, value=%s\n", k, v) - } - - return nil -}) -``` - -The cursor allows you to move to a specific point in the list of keys and move -forward or backward through the keys one at a time. - -The following functions are available on the cursor: - -``` -First() Move to the first key. -Last() Move to the last key. -Seek() Move to a specific key. -Next() Move to the next key. -Prev() Move to the previous key. -``` - -Each of those functions has a return signature of `(key []byte, value []byte)`. -When you have iterated to the end of the cursor then `Next()` will return a -`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()` -before calling `Next()` or `Prev()`. If you do not seek to a position then -these functions will return a `nil` key. - -During iteration, if the key is non-`nil` but the value is `nil`, that means -the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to -access the sub-bucket. - - -#### Prefix scans - -To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`: - -```go -db.View(func(tx *bolt.Tx) error { - // Assume bucket exists and has keys - c := tx.Bucket([]byte("MyBucket")).Cursor() - - prefix := []byte("1234") - for k, v := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = c.Next() { - fmt.Printf("key=%s, value=%s\n", k, v) - } - - return nil -}) -``` - -#### Range scans - -Another common use case is scanning over a range such as a time range. If you -use a sortable time encoding such as RFC3339 then you can query a specific -date range like this: - -```go -db.View(func(tx *bolt.Tx) error { - // Assume our events bucket exists and has RFC3339 encoded time keys. - c := tx.Bucket([]byte("Events")).Cursor() - - // Our time range spans the 90's decade. - min := []byte("1990-01-01T00:00:00Z") - max := []byte("2000-01-01T00:00:00Z") - - // Iterate over the 90's. - for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() { - fmt.Printf("%s: %s\n", k, v) - } - - return nil -}) -``` - -Note that, while RFC3339 is sortable, the Golang implementation of RFC3339Nano does not use a fixed number of digits after the decimal point and is therefore not sortable. - - -#### ForEach() - -You can also use the function `ForEach()` if you know you'll be iterating over -all the keys in a bucket: - -```go -db.View(func(tx *bolt.Tx) error { - // Assume bucket exists and has keys - b := tx.Bucket([]byte("MyBucket")) - - b.ForEach(func(k, v []byte) error { - fmt.Printf("key=%s, value=%s\n", k, v) - return nil - }) - return nil -}) -``` - -Please note that keys and values in `ForEach()` are only valid while -the transaction is open. If you need to use a key or value outside of -the transaction, you must use `copy()` to copy it to another byte -slice. - -### Nested buckets - -You can also store a bucket in a key to create nested buckets. The API is the -same as the bucket management API on the `DB` object: - -```go -func (*Bucket) CreateBucket(key []byte) (*Bucket, error) -func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) -func (*Bucket) DeleteBucket(key []byte) error -``` - -Say you had a multi-tenant application where the root level bucket was the account bucket. Inside of this bucket was a sequence of accounts which themselves are buckets. And inside the sequence bucket you could have many buckets pertaining to the Account itself (Users, Notes, etc) isolating the information into logical groupings. - -```go - -// createUser creates a new user in the given account. -func createUser(accountID int, u *User) error { - // Start the transaction. - tx, err := db.Begin(true) - if err != nil { - return err - } - defer tx.Rollback() - - // Retrieve the root bucket for the account. - // Assume this has already been created when the account was set up. - root := tx.Bucket([]byte(strconv.FormatUint(accountID, 10))) - - // Setup the users bucket. - bkt, err := root.CreateBucketIfNotExists([]byte("USERS")) - if err != nil { - return err - } - - // Generate an ID for the new user. - userID, err := bkt.NextSequence() - if err != nil { - return err - } - u.ID = userID - - // Marshal and save the encoded user. - if buf, err := json.Marshal(u); err != nil { - return err - } else if err := bkt.Put([]byte(strconv.FormatUint(u.ID, 10)), buf); err != nil { - return err - } - - // Commit the transaction. - if err := tx.Commit(); err != nil { - return err - } - - return nil -} - -``` - - - - -### Database backups - -Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()` -function to write a consistent view of the database to a writer. If you call -this from a read-only transaction, it will perform a hot backup and not block -your other database reads and writes. - -By default, it will use a regular file handle which will utilize the operating -system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx) -documentation for information about optimizing for larger-than-RAM datasets. - -One common use case is to backup over HTTP so you can use tools like `cURL` to -do database backups: - -```go -func BackupHandleFunc(w http.ResponseWriter, req *http.Request) { - err := db.View(func(tx *bolt.Tx) error { - w.Header().Set("Content-Type", "application/octet-stream") - w.Header().Set("Content-Disposition", `attachment; filename="my.db"`) - w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size()))) - _, err := tx.WriteTo(w) - return err - }) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - } -} -``` - -Then you can backup using this command: - -```sh -$ curl http://localhost/backup > my.db -``` - -Or you can open your browser to `http://localhost/backup` and it will download -automatically. - -If you want to backup to another file you can use the `Tx.CopyFile()` helper -function. - - -### Statistics - -The database keeps a running count of many of the internal operations it -performs so you can better understand what's going on. By grabbing a snapshot -of these stats at two points in time we can see what operations were performed -in that time range. - -For example, we could start a goroutine to log stats every 10 seconds: - -```go -go func() { - // Grab the initial stats. - prev := db.Stats() - - for { - // Wait for 10s. - time.Sleep(10 * time.Second) - - // Grab the current stats and diff them. - stats := db.Stats() - diff := stats.Sub(&prev) - - // Encode stats to JSON and print to STDERR. - json.NewEncoder(os.Stderr).Encode(diff) - - // Save stats for the next loop. - prev = stats - } -}() -``` - -It's also useful to pipe these stats to a service such as statsd for monitoring -or to provide an HTTP endpoint that will perform a fixed-length sample. - - -### Read-Only Mode - -Sometimes it is useful to create a shared, read-only Bolt database. To this, -set the `Options.ReadOnly` flag when opening your database. Read-only mode -uses a shared lock to allow multiple processes to read from the database but -it will block any processes from opening the database in read-write mode. - -```go -db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true}) -if err != nil { - log.Fatal(err) -} -``` - -### Mobile Use (iOS/Android) - -Bolt is able to run on mobile devices by leveraging the binding feature of the -[gomobile](https://github.com/golang/mobile) tool. Create a struct that will -contain your database logic and a reference to a `*bolt.DB` with a initializing -constructor that takes in a filepath where the database file will be stored. -Neither Android nor iOS require extra permissions or cleanup from using this method. - -```go -func NewBoltDB(filepath string) *BoltDB { - db, err := bolt.Open(filepath+"/demo.db", 0600, nil) - if err != nil { - log.Fatal(err) - } - - return &BoltDB{db} -} - -type BoltDB struct { - db *bolt.DB - ... -} - -func (b *BoltDB) Path() string { - return b.db.Path() -} - -func (b *BoltDB) Close() { - b.db.Close() -} -``` - -Database logic should be defined as methods on this wrapper struct. - -To initialize this struct from the native language (both platforms now sync -their local storage to the cloud. These snippets disable that functionality for the -database file): - -#### Android - -```java -String path; -if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){ - path = getNoBackupFilesDir().getAbsolutePath(); -} else{ - path = getFilesDir().getAbsolutePath(); -} -Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path) -``` - -#### iOS - -```objc -- (void)demo { - NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory, - NSUserDomainMask, - YES) objectAtIndex:0]; - GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path); - [self addSkipBackupAttributeToItemAtPath:demo.path]; - //Some DB Logic would go here - [demo close]; -} - -- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString -{ - NSURL* URL= [NSURL fileURLWithPath: filePathString]; - assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]); - - NSError *error = nil; - BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES] - forKey: NSURLIsExcludedFromBackupKey error: &error]; - if(!success){ - NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error); - } - return success; -} - -``` - -## Resources - -For more information on getting started with Bolt, check out the following articles: - -* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch). -* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville - - -## Comparison with other databases - -### Postgres, MySQL, & other relational databases - -Relational databases structure data into rows and are only accessible through -the use of SQL. This approach provides flexibility in how you store and query -your data but also incurs overhead in parsing and planning SQL statements. Bolt -accesses all data by a byte slice key. This makes Bolt fast to read and write -data by key but provides no built-in support for joining values together. - -Most relational databases (with the exception of SQLite) are standalone servers -that run separately from your application. This gives your systems -flexibility to connect multiple application servers to a single database -server but also adds overhead in serializing and transporting data over the -network. Bolt runs as a library included in your application so all data access -has to go through your application's process. This brings data closer to your -application but limits multi-process access to the data. - - -### LevelDB, RocksDB - -LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that -they are libraries bundled into the application, however, their underlying -structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes -random writes by using a write ahead log and multi-tiered, sorted files called -SSTables. Bolt uses a B+tree internally and only a single file. Both approaches -have trade-offs. - -If you require a high random write throughput (>10,000 w/sec) or you need to use -spinning disks then LevelDB could be a good choice. If your application is -read-heavy or does a lot of range scans then Bolt could be a good choice. - -One other important consideration is that LevelDB does not have transactions. -It supports batch writing of key/values pairs and it supports read snapshots -but it will not give you the ability to do a compare-and-swap operation safely. -Bolt supports fully serializable ACID transactions. - - -### LMDB - -Bolt was originally a port of LMDB so it is architecturally similar. Both use -a B+tree, have ACID semantics with fully serializable transactions, and support -lock-free MVCC using a single writer and multiple readers. - -The two projects have somewhat diverged. LMDB heavily focuses on raw performance -while Bolt has focused on simplicity and ease of use. For example, LMDB allows -several unsafe actions such as direct writes for the sake of performance. Bolt -opts to disallow actions which can leave the database in a corrupted state. The -only exception to this in Bolt is `DB.NoSync`. - -There are also a few differences in API. LMDB requires a maximum mmap size when -opening an `mdb_env` whereas Bolt will handle incremental mmap resizing -automatically. LMDB overloads the getter and setter functions with multiple -flags whereas Bolt splits these specialized cases into their own functions. - - -## Caveats & Limitations - -It's important to pick the right tool for the job and Bolt is no exception. -Here are a few things to note when evaluating and using Bolt: - -* Bolt is good for read intensive workloads. Sequential write performance is - also fast but random writes can be slow. You can use `DB.Batch()` or add a - write-ahead log to help mitigate this issue. - -* Bolt uses a B+tree internally so there can be a lot of random page access. - SSDs provide a significant performance boost over spinning disks. - -* Try to avoid long running read transactions. Bolt uses copy-on-write so - old pages cannot be reclaimed while an old transaction is using them. - -* Byte slices returned from Bolt are only valid during a transaction. Once the - transaction has been committed or rolled back then the memory they point to - can be reused by a new page or can be unmapped from virtual memory and you'll - see an `unexpected fault address` panic when accessing it. - -* Bolt uses an exclusive write lock on the database file so it cannot be - shared by multiple processes. - -* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for - buckets that have random inserts will cause your database to have very poor - page utilization. - -* Use larger buckets in general. Smaller buckets causes poor page utilization - once they become larger than the page size (typically 4KB). - -* Bulk loading a lot of random writes into a new bucket can be slow as the - page will not split until the transaction is committed. Randomly inserting - more than 100,000 key/value pairs into a single new bucket in a single - transaction is not advised. - -* Bolt uses a memory-mapped file so the underlying operating system handles the - caching of the data. Typically, the OS will cache as much of the file as it - can in memory and will release memory as needed to other processes. This means - that Bolt can show very high memory usage when working with large databases. - However, this is expected and the OS will release memory as needed. Bolt can - handle databases much larger than the available physical RAM, provided its - memory-map fits in the process virtual address space. It may be problematic - on 32-bits systems. - -* The data structures in the Bolt database are memory mapped so the data file - will be endian specific. This means that you cannot copy a Bolt file from a - little endian machine to a big endian machine and have it work. For most - users this is not a concern since most modern CPUs are little endian. - -* Because of the way pages are laid out on disk, Bolt cannot truncate data files - and return free pages back to the disk. Instead, Bolt maintains a free list - of unused pages within its data file. These free pages can be reused by later - transactions. This works well for many use cases as databases generally tend - to grow. However, it's important to note that deleting large chunks of data - will not allow you to reclaim that space on disk. - - For more information on page allocation, [see this comment][page-allocation]. - -[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638 - - -## Reading the Source - -Bolt is a relatively small code base (<3KLOC) for an embedded, serializable, -transactional key/value database so it can be a good starting point for people -interested in how databases work. - -The best places to start are the main entry points into Bolt: - -- `Open()` - Initializes the reference to the database. It's responsible for - creating the database if it doesn't exist, obtaining an exclusive lock on the - file, reading the meta pages, & memory-mapping the file. - -- `DB.Begin()` - Starts a read-only or read-write transaction depending on the - value of the `writable` argument. This requires briefly obtaining the "meta" - lock to keep track of open transactions. Only one read-write transaction can - exist at a time so the "rwlock" is acquired during the life of a read-write - transaction. - -- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the - arguments, a cursor is used to traverse the B+tree to the page and position - where they key & value will be written. Once the position is found, the bucket - materializes the underlying page and the page's parent pages into memory as - "nodes". These nodes are where mutations occur during read-write transactions. - These changes get flushed to disk during commit. - -- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor - to move to the page & position of a key/value pair. During a read-only - transaction, the key and value data is returned as a direct reference to the - underlying mmap file so there's no allocation overhead. For read-write - transactions, this data may reference the mmap file or one of the in-memory - node values. - -- `Cursor` - This object is simply for traversing the B+tree of on-disk pages - or in-memory nodes. It can seek to a specific key, move to the first or last - value, or it can move forward or backward. The cursor handles the movement up - and down the B+tree transparently to the end user. - -- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages - into pages to be written to disk. Writing to disk then occurs in two phases. - First, the dirty pages are written to disk and an `fsync()` occurs. Second, a - new meta page with an incremented transaction ID is written and another - `fsync()` occurs. This two phase write ensures that partially written data - pages are ignored in the event of a crash since the meta page pointing to them - is never written. Partially written meta pages are invalidated because they - are written with a checksum. - -If you have additional notes that could be helpful for others, please submit -them via pull request. - - -## Other Projects Using Bolt - -Below is a list of public, open source projects that use Bolt: - -* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files. -* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard. -* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside. -* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb. -* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics. -* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects. -* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday. -* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. -* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. -* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin". -* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka. -* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed. -* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt. -* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site. -* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage. -* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters. -* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend. -* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend. -* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server. -* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read. -* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics. -* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data. -* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. -* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. -* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs. -* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems. -* [stow](https://github.com/djherbis/stow) - a persistence manager for objects - backed by boltdb. -* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining - simple tx and key scans. -* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets. -* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service -* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service. -* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners. -* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores. -* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB. -* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB. -* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings. -* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend. -* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files. -* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter. -* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development. -* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains -* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal. -* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet. -* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency. -* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies -* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB -* [Ponzu CMS](https://ponzu-cms.org) - Headless CMS + automatic JSON API with auto-HTTPS, HTTP/2 Server Push, and flexible server framework. - -If you are using Bolt in a project please send a pull request to add it to the list. diff --git a/installer/vendor/github.com/boltdb/bolt/appveyor.yml b/installer/vendor/github.com/boltdb/bolt/appveyor.yml deleted file mode 100644 index 6e26e941d6..0000000000 --- a/installer/vendor/github.com/boltdb/bolt/appveyor.yml +++ /dev/null @@ -1,18 +0,0 @@ -version: "{build}" - -os: Windows Server 2012 R2 - -clone_folder: c:\gopath\src\github.com\boltdb\bolt - -environment: - GOPATH: c:\gopath - -install: - - echo %PATH% - - echo %GOPATH% - - go version - - go env - - go get -v -t ./... - -build_script: - - go test -v ./... diff --git a/installer/vendor/github.com/boltdb/bolt/bucket_test.go b/installer/vendor/github.com/boltdb/bolt/bucket_test.go deleted file mode 100644 index cddbe27131..0000000000 --- a/installer/vendor/github.com/boltdb/bolt/bucket_test.go +++ /dev/null @@ -1,1909 +0,0 @@ -package bolt_test - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "log" - "math/rand" - "os" - "strconv" - "strings" - "testing" - "testing/quick" - - "github.com/boltdb/bolt" -) - -// Ensure that a bucket that gets a non-existent key returns nil. -func TestBucket_Get_NonExistent(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if v := b.Get([]byte("foo")); v != nil { - t.Fatal("expected nil value") - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a bucket can read a value that is not flushed yet. -func TestBucket_Get_FromNode(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - if v := b.Get([]byte("foo")); !bytes.Equal(v, []byte("bar")) { - t.Fatalf("unexpected value: %v", v) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a bucket retrieved via Get() returns a nil. -func TestBucket_Get_IncompatibleValue(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - - if _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")); err != nil { - t.Fatal(err) - } - - if tx.Bucket([]byte("widgets")).Get([]byte("foo")) != nil { - t.Fatal("expected nil value") - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a slice returned from a bucket has a capacity equal to its length. -// This also allows slices to be appended to since it will require a realloc by Go. -// -// https://github.com/boltdb/bolt/issues/544 -func TestBucket_Get_Capacity(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - // Write key to a bucket. - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("bucket")) - if err != nil { - return err - } - return b.Put([]byte("key"), []byte("val")) - }); err != nil { - t.Fatal(err) - } - - // Retrieve value and attempt to append to it. - if err := db.Update(func(tx *bolt.Tx) error { - k, v := tx.Bucket([]byte("bucket")).Cursor().First() - - // Verify capacity. - if len(k) != cap(k) { - t.Fatalf("unexpected key slice capacity: %d", cap(k)) - } else if len(v) != cap(v) { - t.Fatalf("unexpected value slice capacity: %d", cap(v)) - } - - // Ensure slice can be appended to without a segfault. - k = append(k, []byte("123")...) - v = append(v, []byte("123")...) - - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a bucket can write a key/value. -func TestBucket_Put(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - - v := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - if !bytes.Equal([]byte("bar"), v) { - t.Fatalf("unexpected value: %v", v) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a bucket can rewrite a key in the same transaction. -func TestBucket_Put_Repeat(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("baz")); err != nil { - t.Fatal(err) - } - - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - if !bytes.Equal([]byte("baz"), value) { - t.Fatalf("unexpected value: %v", value) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a bucket can write a bunch of large values. -func TestBucket_Put_Large(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - count, factor := 100, 200 - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - for i := 1; i < count; i++ { - if err := b.Put([]byte(strings.Repeat("0", i*factor)), []byte(strings.Repeat("X", (count-i)*factor))); err != nil { - t.Fatal(err) - } - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for i := 1; i < count; i++ { - value := b.Get([]byte(strings.Repeat("0", i*factor))) - if !bytes.Equal(value, []byte(strings.Repeat("X", (count-i)*factor))) { - t.Fatalf("unexpected value: %v", value) - } - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a database can perform multiple large appends safely. -func TestDB_Put_VeryLarge(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - - n, batchN := 400000, 200000 - ksize, vsize := 8, 500 - - db := MustOpenDB() - defer db.MustClose() - - for i := 0; i < n; i += batchN { - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucketIfNotExists([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - for j := 0; j < batchN; j++ { - k, v := make([]byte, ksize), make([]byte, vsize) - binary.BigEndian.PutUint32(k, uint32(i+j)) - if err := b.Put(k, v); err != nil { - t.Fatal(err) - } - } - return nil - }); err != nil { - t.Fatal(err) - } - } -} - -// Ensure that a setting a value on a key with a bucket value returns an error. -func TestBucket_Put_IncompatibleValue(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - b0, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - - if _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")); err != nil { - t.Fatal(err) - } - if err := b0.Put([]byte("foo"), []byte("bar")); err != bolt.ErrIncompatibleValue { - t.Fatalf("unexpected error: %s", err) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a setting a value while the transaction is closed returns an error. -func TestBucket_Put_Closed(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - tx, err := db.Begin(true) - if err != nil { - t.Fatal(err) - } - - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - - if err := tx.Rollback(); err != nil { - t.Fatal(err) - } - - if err := b.Put([]byte("foo"), []byte("bar")); err != bolt.ErrTxClosed { - t.Fatalf("unexpected error: %s", err) - } -} - -// Ensure that setting a value on a read-only bucket returns an error. -func TestBucket_Put_ReadOnly(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - if err := b.Put([]byte("foo"), []byte("bar")); err != bolt.ErrTxNotWritable { - t.Fatalf("unexpected error: %s", err) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a bucket can delete an existing key. -func TestBucket_Delete(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - if err := b.Delete([]byte("foo")); err != nil { - t.Fatal(err) - } - if v := b.Get([]byte("foo")); v != nil { - t.Fatalf("unexpected value: %v", v) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that deleting a large set of keys will work correctly. -func TestBucket_Delete_Large(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - - for i := 0; i < 100; i++ { - if err := b.Put([]byte(strconv.Itoa(i)), []byte(strings.Repeat("*", 1024))); err != nil { - t.Fatal(err) - } - } - - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for i := 0; i < 100; i++ { - if err := b.Delete([]byte(strconv.Itoa(i))); err != nil { - t.Fatal(err) - } - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for i := 0; i < 100; i++ { - if v := b.Get([]byte(strconv.Itoa(i))); v != nil { - t.Fatalf("unexpected value: %v, i=%d", v, i) - } - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Deleting a very large list of keys will cause the freelist to use overflow. -func TestBucket_Delete_FreelistOverflow(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - - db := MustOpenDB() - defer db.MustClose() - - k := make([]byte, 16) - for i := uint64(0); i < 10000; i++ { - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucketIfNotExists([]byte("0")) - if err != nil { - t.Fatalf("bucket error: %s", err) - } - - for j := uint64(0); j < 1000; j++ { - binary.BigEndian.PutUint64(k[:8], i) - binary.BigEndian.PutUint64(k[8:], j) - if err := b.Put(k, nil); err != nil { - t.Fatalf("put error: %s", err) - } - } - - return nil - }); err != nil { - t.Fatal(err) - } - } - - // Delete all of them in one large transaction - if err := db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("0")) - c := b.Cursor() - for k, _ := c.First(); k != nil; k, _ = c.Next() { - if err := c.Delete(); err != nil { - t.Fatal(err) - } - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that accessing and updating nested buckets is ok across transactions. -func TestBucket_Nested(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - // Create a widgets bucket. - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - - // Create a widgets/foo bucket. - _, err = b.CreateBucket([]byte("foo")) - if err != nil { - t.Fatal(err) - } - - // Create a widgets/bar key. - if err := b.Put([]byte("bar"), []byte("0000")); err != nil { - t.Fatal(err) - } - - return nil - }); err != nil { - t.Fatal(err) - } - db.MustCheck() - - // Update widgets/bar. - if err := db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - if err := b.Put([]byte("bar"), []byte("xxxx")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - db.MustCheck() - - // Cause a split. - if err := db.Update(func(tx *bolt.Tx) error { - var b = tx.Bucket([]byte("widgets")) - for i := 0; i < 10000; i++ { - if err := b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil { - t.Fatal(err) - } - } - return nil - }); err != nil { - t.Fatal(err) - } - db.MustCheck() - - // Insert into widgets/foo/baz. - if err := db.Update(func(tx *bolt.Tx) error { - var b = tx.Bucket([]byte("widgets")) - if err := b.Bucket([]byte("foo")).Put([]byte("baz"), []byte("yyyy")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - db.MustCheck() - - // Verify. - if err := db.View(func(tx *bolt.Tx) error { - var b = tx.Bucket([]byte("widgets")) - if v := b.Bucket([]byte("foo")).Get([]byte("baz")); !bytes.Equal(v, []byte("yyyy")) { - t.Fatalf("unexpected value: %v", v) - } - if v := b.Get([]byte("bar")); !bytes.Equal(v, []byte("xxxx")) { - t.Fatalf("unexpected value: %v", v) - } - for i := 0; i < 10000; i++ { - if v := b.Get([]byte(strconv.Itoa(i))); !bytes.Equal(v, []byte(strconv.Itoa(i))) { - t.Fatalf("unexpected value: %v", v) - } - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that deleting a bucket using Delete() returns an error. -func TestBucket_Delete_Bucket(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if _, err := b.CreateBucket([]byte("foo")); err != nil { - t.Fatal(err) - } - if err := b.Delete([]byte("foo")); err != bolt.ErrIncompatibleValue { - t.Fatalf("unexpected error: %s", err) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that deleting a key on a read-only bucket returns an error. -func TestBucket_Delete_ReadOnly(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.View(func(tx *bolt.Tx) error { - if err := tx.Bucket([]byte("widgets")).Delete([]byte("foo")); err != bolt.ErrTxNotWritable { - t.Fatalf("unexpected error: %s", err) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a deleting value while the transaction is closed returns an error. -func TestBucket_Delete_Closed(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - tx, err := db.Begin(true) - if err != nil { - t.Fatal(err) - } - - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - - if err := tx.Rollback(); err != nil { - t.Fatal(err) - } - if err := b.Delete([]byte("foo")); err != bolt.ErrTxClosed { - t.Fatalf("unexpected error: %s", err) - } -} - -// Ensure that deleting a bucket causes nested buckets to be deleted. -func TestBucket_DeleteBucket_Nested(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - widgets, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - - foo, err := widgets.CreateBucket([]byte("foo")) - if err != nil { - t.Fatal(err) - } - - bar, err := foo.CreateBucket([]byte("bar")) - if err != nil { - t.Fatal(err) - } - if err := bar.Put([]byte("baz"), []byte("bat")); err != nil { - t.Fatal(err) - } - if err := tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that deleting a bucket causes nested buckets to be deleted after they have been committed. -func TestBucket_DeleteBucket_Nested2(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - widgets, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - - foo, err := widgets.CreateBucket([]byte("foo")) - if err != nil { - t.Fatal(err) - } - - bar, err := foo.CreateBucket([]byte("bar")) - if err != nil { - t.Fatal(err) - } - - if err := bar.Put([]byte("baz"), []byte("bat")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.Update(func(tx *bolt.Tx) error { - widgets := tx.Bucket([]byte("widgets")) - if widgets == nil { - t.Fatal("expected widgets bucket") - } - - foo := widgets.Bucket([]byte("foo")) - if foo == nil { - t.Fatal("expected foo bucket") - } - - bar := foo.Bucket([]byte("bar")) - if bar == nil { - t.Fatal("expected bar bucket") - } - - if v := bar.Get([]byte("baz")); !bytes.Equal(v, []byte("bat")) { - t.Fatalf("unexpected value: %v", v) - } - if err := tx.DeleteBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.View(func(tx *bolt.Tx) error { - if tx.Bucket([]byte("widgets")) != nil { - t.Fatal("expected bucket to be deleted") - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that deleting a child bucket with multiple pages causes all pages to get collected. -// NOTE: Consistency check in bolt_test.DB.Close() will panic if pages not freed properly. -func TestBucket_DeleteBucket_Large(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - widgets, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - - foo, err := widgets.CreateBucket([]byte("foo")) - if err != nil { - t.Fatal(err) - } - - for i := 0; i < 1000; i++ { - if err := foo.Put([]byte(fmt.Sprintf("%d", i)), []byte(fmt.Sprintf("%0100d", i))); err != nil { - t.Fatal(err) - } - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.Update(func(tx *bolt.Tx) error { - if err := tx.DeleteBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a simple value retrieved via Bucket() returns a nil. -func TestBucket_Bucket_IncompatibleValue(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - widgets, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - - if err := widgets.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - if b := tx.Bucket([]byte("widgets")).Bucket([]byte("foo")); b != nil { - t.Fatal("expected nil bucket") - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that creating a bucket on an existing non-bucket key returns an error. -func TestBucket_CreateBucket_IncompatibleValue(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - widgets, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - - if err := widgets.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - if _, err := widgets.CreateBucket([]byte("foo")); err != bolt.ErrIncompatibleValue { - t.Fatalf("unexpected error: %s", err) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that deleting a bucket on an existing non-bucket key returns an error. -func TestBucket_DeleteBucket_IncompatibleValue(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - widgets, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := widgets.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - if err := tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo")); err != bolt.ErrIncompatibleValue { - t.Fatalf("unexpected error: %s", err) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure bucket can set and update its sequence number. -func TestBucket_Sequence(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - bkt, err := tx.CreateBucket([]byte("0")) - if err != nil { - t.Fatal(err) - } - - // Retrieve sequence. - if v := bkt.Sequence(); v != 0 { - t.Fatalf("unexpected sequence: %d", v) - } - - // Update sequence. - if err := bkt.SetSequence(1000); err != nil { - t.Fatal(err) - } - - // Read sequence again. - if v := bkt.Sequence(); v != 1000 { - t.Fatalf("unexpected sequence: %d", v) - } - - return nil - }); err != nil { - t.Fatal(err) - } - - // Verify sequence in separate transaction. - if err := db.View(func(tx *bolt.Tx) error { - if v := tx.Bucket([]byte("0")).Sequence(); v != 1000 { - t.Fatalf("unexpected sequence: %d", v) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a bucket can return an autoincrementing sequence. -func TestBucket_NextSequence(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - widgets, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - woojits, err := tx.CreateBucket([]byte("woojits")) - if err != nil { - t.Fatal(err) - } - - // Make sure sequence increments. - if seq, err := widgets.NextSequence(); err != nil { - t.Fatal(err) - } else if seq != 1 { - t.Fatalf("unexpecte sequence: %d", seq) - } - - if seq, err := widgets.NextSequence(); err != nil { - t.Fatal(err) - } else if seq != 2 { - t.Fatalf("unexpected sequence: %d", seq) - } - - // Buckets should be separate. - if seq, err := woojits.NextSequence(); err != nil { - t.Fatal(err) - } else if seq != 1 { - t.Fatalf("unexpected sequence: %d", 1) - } - - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a bucket will persist an autoincrementing sequence even if its -// the only thing updated on the bucket. -// https://github.com/boltdb/bolt/issues/296 -func TestBucket_NextSequence_Persist(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.Bucket([]byte("widgets")).NextSequence(); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.Update(func(tx *bolt.Tx) error { - seq, err := tx.Bucket([]byte("widgets")).NextSequence() - if err != nil { - t.Fatalf("unexpected error: %s", err) - } else if seq != 2 { - t.Fatalf("unexpected sequence: %d", seq) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that retrieving the next sequence on a read-only bucket returns an error. -func TestBucket_NextSequence_ReadOnly(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.View(func(tx *bolt.Tx) error { - _, err := tx.Bucket([]byte("widgets")).NextSequence() - if err != bolt.ErrTxNotWritable { - t.Fatalf("unexpected error: %s", err) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that retrieving the next sequence for a bucket on a closed database return an error. -func TestBucket_NextSequence_Closed(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - tx, err := db.Begin(true) - if err != nil { - t.Fatal(err) - } - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := tx.Rollback(); err != nil { - t.Fatal(err) - } - if _, err := b.NextSequence(); err != bolt.ErrTxClosed { - t.Fatal(err) - } -} - -// Ensure a user can loop over all key/value pairs in a bucket. -func TestBucket_ForEach(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("0000")); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("baz"), []byte("0001")); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("bar"), []byte("0002")); err != nil { - t.Fatal(err) - } - - var index int - if err := b.ForEach(func(k, v []byte) error { - switch index { - case 0: - if !bytes.Equal(k, []byte("bar")) { - t.Fatalf("unexpected key: %v", k) - } else if !bytes.Equal(v, []byte("0002")) { - t.Fatalf("unexpected value: %v", v) - } - case 1: - if !bytes.Equal(k, []byte("baz")) { - t.Fatalf("unexpected key: %v", k) - } else if !bytes.Equal(v, []byte("0001")) { - t.Fatalf("unexpected value: %v", v) - } - case 2: - if !bytes.Equal(k, []byte("foo")) { - t.Fatalf("unexpected key: %v", k) - } else if !bytes.Equal(v, []byte("0000")) { - t.Fatalf("unexpected value: %v", v) - } - } - index++ - return nil - }); err != nil { - t.Fatal(err) - } - - if index != 3 { - t.Fatalf("unexpected index: %d", index) - } - - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure a database can stop iteration early. -func TestBucket_ForEach_ShortCircuit(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("bar"), []byte("0000")); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("baz"), []byte("0000")); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("0000")); err != nil { - t.Fatal(err) - } - - var index int - if err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error { - index++ - if bytes.Equal(k, []byte("baz")) { - return errors.New("marker") - } - return nil - }); err == nil || err.Error() != "marker" { - t.Fatalf("unexpected error: %s", err) - } - if index != 2 { - t.Fatalf("unexpected index: %d", index) - } - - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that looping over a bucket on a closed database returns an error. -func TestBucket_ForEach_Closed(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - tx, err := db.Begin(true) - if err != nil { - t.Fatal(err) - } - - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - - if err := tx.Rollback(); err != nil { - t.Fatal(err) - } - - if err := b.ForEach(func(k, v []byte) error { return nil }); err != bolt.ErrTxClosed { - t.Fatalf("unexpected error: %s", err) - } -} - -// Ensure that an error is returned when inserting with an empty key. -func TestBucket_Put_EmptyKey(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte(""), []byte("bar")); err != bolt.ErrKeyRequired { - t.Fatalf("unexpected error: %s", err) - } - if err := b.Put(nil, []byte("bar")); err != bolt.ErrKeyRequired { - t.Fatalf("unexpected error: %s", err) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that an error is returned when inserting with a key that's too large. -func TestBucket_Put_KeyTooLarge(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put(make([]byte, 32769), []byte("bar")); err != bolt.ErrKeyTooLarge { - t.Fatalf("unexpected error: %s", err) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that an error is returned when inserting a value that's too large. -func TestBucket_Put_ValueTooLarge(t *testing.T) { - // Skip this test on DroneCI because the machine is resource constrained. - if os.Getenv("DRONE") == "true" { - t.Skip("not enough RAM for test") - } - - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), make([]byte, bolt.MaxValueSize+1)); err != bolt.ErrValueTooLarge { - t.Fatalf("unexpected error: %s", err) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure a bucket can calculate stats. -func TestBucket_Stats(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - // Add bucket with fewer keys but one big value. - bigKey := []byte("really-big-value") - for i := 0; i < 500; i++ { - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucketIfNotExists([]byte("woojits")) - if err != nil { - t.Fatal(err) - } - - if err := b.Put([]byte(fmt.Sprintf("%03d", i)), []byte(strconv.Itoa(i))); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - } - if err := db.Update(func(tx *bolt.Tx) error { - if err := tx.Bucket([]byte("woojits")).Put(bigKey, []byte(strings.Repeat("*", 10000))); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - db.MustCheck() - - if err := db.View(func(tx *bolt.Tx) error { - stats := tx.Bucket([]byte("woojits")).Stats() - if stats.BranchPageN != 1 { - t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN) - } else if stats.BranchOverflowN != 0 { - t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN) - } else if stats.LeafPageN != 7 { - t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN) - } else if stats.LeafOverflowN != 2 { - t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN) - } else if stats.KeyN != 501 { - t.Fatalf("unexpected KeyN: %d", stats.KeyN) - } else if stats.Depth != 2 { - t.Fatalf("unexpected Depth: %d", stats.Depth) - } - - branchInuse := 16 // branch page header - branchInuse += 7 * 16 // branch elements - branchInuse += 7 * 3 // branch keys (6 3-byte keys) - if stats.BranchInuse != branchInuse { - t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse) - } - - leafInuse := 7 * 16 // leaf page header - leafInuse += 501 * 16 // leaf elements - leafInuse += 500*3 + len(bigKey) // leaf keys - leafInuse += 1*10 + 2*90 + 3*400 + 10000 // leaf values - if stats.LeafInuse != leafInuse { - t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse) - } - - // Only check allocations for 4KB pages. - if os.Getpagesize() == 4096 { - if stats.BranchAlloc != 4096 { - t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc) - } else if stats.LeafAlloc != 36864 { - t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc) - } - } - - if stats.BucketN != 1 { - t.Fatalf("unexpected BucketN: %d", stats.BucketN) - } else if stats.InlineBucketN != 0 { - t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN) - } else if stats.InlineBucketInuse != 0 { - t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse) - } - - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure a bucket with random insertion utilizes fill percentage correctly. -func TestBucket_Stats_RandomFill(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } else if os.Getpagesize() != 4096 { - t.Skip("invalid page size for test") - } - - db := MustOpenDB() - defer db.MustClose() - - // Add a set of values in random order. It will be the same random - // order so we can maintain consistency between test runs. - var count int - rand := rand.New(rand.NewSource(42)) - for _, i := range rand.Perm(1000) { - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucketIfNotExists([]byte("woojits")) - if err != nil { - t.Fatal(err) - } - b.FillPercent = 0.9 - for _, j := range rand.Perm(100) { - index := (j * 10000) + i - if err := b.Put([]byte(fmt.Sprintf("%d000000000000000", index)), []byte("0000000000")); err != nil { - t.Fatal(err) - } - count++ - } - return nil - }); err != nil { - t.Fatal(err) - } - } - - db.MustCheck() - - if err := db.View(func(tx *bolt.Tx) error { - stats := tx.Bucket([]byte("woojits")).Stats() - if stats.KeyN != 100000 { - t.Fatalf("unexpected KeyN: %d", stats.KeyN) - } - - if stats.BranchPageN != 98 { - t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN) - } else if stats.BranchOverflowN != 0 { - t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN) - } else if stats.BranchInuse != 130984 { - t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse) - } else if stats.BranchAlloc != 401408 { - t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc) - } - - if stats.LeafPageN != 3412 { - t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN) - } else if stats.LeafOverflowN != 0 { - t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN) - } else if stats.LeafInuse != 4742482 { - t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse) - } else if stats.LeafAlloc != 13975552 { - t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure a bucket can calculate stats. -func TestBucket_Stats_Small(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - // Add a bucket that fits on a single root leaf. - b, err := tx.CreateBucket([]byte("whozawhats")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - - return nil - }); err != nil { - t.Fatal(err) - } - - db.MustCheck() - - if err := db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("whozawhats")) - stats := b.Stats() - if stats.BranchPageN != 0 { - t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN) - } else if stats.BranchOverflowN != 0 { - t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN) - } else if stats.LeafPageN != 0 { - t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN) - } else if stats.LeafOverflowN != 0 { - t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN) - } else if stats.KeyN != 1 { - t.Fatalf("unexpected KeyN: %d", stats.KeyN) - } else if stats.Depth != 1 { - t.Fatalf("unexpected Depth: %d", stats.Depth) - } else if stats.BranchInuse != 0 { - t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse) - } else if stats.LeafInuse != 0 { - t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse) - } - - if os.Getpagesize() == 4096 { - if stats.BranchAlloc != 0 { - t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc) - } else if stats.LeafAlloc != 0 { - t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc) - } - } - - if stats.BucketN != 1 { - t.Fatalf("unexpected BucketN: %d", stats.BucketN) - } else if stats.InlineBucketN != 1 { - t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN) - } else if stats.InlineBucketInuse != 16+16+6 { - t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse) - } - - return nil - }); err != nil { - t.Fatal(err) - } -} - -func TestBucket_Stats_EmptyBucket(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - // Add a bucket that fits on a single root leaf. - if _, err := tx.CreateBucket([]byte("whozawhats")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - db.MustCheck() - - if err := db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("whozawhats")) - stats := b.Stats() - if stats.BranchPageN != 0 { - t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN) - } else if stats.BranchOverflowN != 0 { - t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN) - } else if stats.LeafPageN != 0 { - t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN) - } else if stats.LeafOverflowN != 0 { - t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN) - } else if stats.KeyN != 0 { - t.Fatalf("unexpected KeyN: %d", stats.KeyN) - } else if stats.Depth != 1 { - t.Fatalf("unexpected Depth: %d", stats.Depth) - } else if stats.BranchInuse != 0 { - t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse) - } else if stats.LeafInuse != 0 { - t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse) - } - - if os.Getpagesize() == 4096 { - if stats.BranchAlloc != 0 { - t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc) - } else if stats.LeafAlloc != 0 { - t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc) - } - } - - if stats.BucketN != 1 { - t.Fatalf("unexpected BucketN: %d", stats.BucketN) - } else if stats.InlineBucketN != 1 { - t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN) - } else if stats.InlineBucketInuse != 16 { - t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse) - } - - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure a bucket can calculate stats. -func TestBucket_Stats_Nested(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("foo")) - if err != nil { - t.Fatal(err) - } - for i := 0; i < 100; i++ { - if err := b.Put([]byte(fmt.Sprintf("%02d", i)), []byte(fmt.Sprintf("%02d", i))); err != nil { - t.Fatal(err) - } - } - - bar, err := b.CreateBucket([]byte("bar")) - if err != nil { - t.Fatal(err) - } - for i := 0; i < 10; i++ { - if err := bar.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil { - t.Fatal(err) - } - } - - baz, err := bar.CreateBucket([]byte("baz")) - if err != nil { - t.Fatal(err) - } - for i := 0; i < 10; i++ { - if err := baz.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil { - t.Fatal(err) - } - } - - return nil - }); err != nil { - t.Fatal(err) - } - - db.MustCheck() - - if err := db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("foo")) - stats := b.Stats() - if stats.BranchPageN != 0 { - t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN) - } else if stats.BranchOverflowN != 0 { - t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN) - } else if stats.LeafPageN != 2 { - t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN) - } else if stats.LeafOverflowN != 0 { - t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN) - } else if stats.KeyN != 122 { - t.Fatalf("unexpected KeyN: %d", stats.KeyN) - } else if stats.Depth != 3 { - t.Fatalf("unexpected Depth: %d", stats.Depth) - } else if stats.BranchInuse != 0 { - t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse) - } - - foo := 16 // foo (pghdr) - foo += 101 * 16 // foo leaf elements - foo += 100*2 + 100*2 // foo leaf key/values - foo += 3 + 16 // foo -> bar key/value - - bar := 16 // bar (pghdr) - bar += 11 * 16 // bar leaf elements - bar += 10 + 10 // bar leaf key/values - bar += 3 + 16 // bar -> baz key/value - - baz := 16 // baz (inline) (pghdr) - baz += 10 * 16 // baz leaf elements - baz += 10 + 10 // baz leaf key/values - - if stats.LeafInuse != foo+bar+baz { - t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse) - } - - if os.Getpagesize() == 4096 { - if stats.BranchAlloc != 0 { - t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc) - } else if stats.LeafAlloc != 8192 { - t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc) - } - } - - if stats.BucketN != 3 { - t.Fatalf("unexpected BucketN: %d", stats.BucketN) - } else if stats.InlineBucketN != 1 { - t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN) - } else if stats.InlineBucketInuse != baz { - t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse) - } - - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure a large bucket can calculate stats. -func TestBucket_Stats_Large(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - - db := MustOpenDB() - defer db.MustClose() - - var index int - for i := 0; i < 100; i++ { - // Add bucket with lots of keys. - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucketIfNotExists([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - for i := 0; i < 1000; i++ { - if err := b.Put([]byte(strconv.Itoa(index)), []byte(strconv.Itoa(index))); err != nil { - t.Fatal(err) - } - index++ - } - return nil - }); err != nil { - t.Fatal(err) - } - } - - db.MustCheck() - - if err := db.View(func(tx *bolt.Tx) error { - stats := tx.Bucket([]byte("widgets")).Stats() - if stats.BranchPageN != 13 { - t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN) - } else if stats.BranchOverflowN != 0 { - t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN) - } else if stats.LeafPageN != 1196 { - t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN) - } else if stats.LeafOverflowN != 0 { - t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN) - } else if stats.KeyN != 100000 { - t.Fatalf("unexpected KeyN: %d", stats.KeyN) - } else if stats.Depth != 3 { - t.Fatalf("unexpected Depth: %d", stats.Depth) - } else if stats.BranchInuse != 25257 { - t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse) - } else if stats.LeafInuse != 2596916 { - t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse) - } - - if os.Getpagesize() == 4096 { - if stats.BranchAlloc != 53248 { - t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc) - } else if stats.LeafAlloc != 4898816 { - t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc) - } - } - - if stats.BucketN != 1 { - t.Fatalf("unexpected BucketN: %d", stats.BucketN) - } else if stats.InlineBucketN != 0 { - t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN) - } else if stats.InlineBucketInuse != 0 { - t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse) - } - - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a bucket can write random keys and values across multiple transactions. -func TestBucket_Put_Single(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - - index := 0 - if err := quick.Check(func(items testdata) bool { - db := MustOpenDB() - defer db.MustClose() - - m := make(map[string][]byte) - - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - for _, item := range items { - if err := db.Update(func(tx *bolt.Tx) error { - if err := tx.Bucket([]byte("widgets")).Put(item.Key, item.Value); err != nil { - panic("put error: " + err.Error()) - } - m[string(item.Key)] = item.Value - return nil - }); err != nil { - t.Fatal(err) - } - - // Verify all key/values so far. - if err := db.View(func(tx *bolt.Tx) error { - i := 0 - for k, v := range m { - value := tx.Bucket([]byte("widgets")).Get([]byte(k)) - if !bytes.Equal(value, v) { - t.Logf("value mismatch [run %d] (%d of %d):\nkey: %x\ngot: %x\nexp: %x", index, i, len(m), []byte(k), value, v) - db.CopyTempFile() - t.FailNow() - } - i++ - } - return nil - }); err != nil { - t.Fatal(err) - } - } - - index++ - return true - }, nil); err != nil { - t.Error(err) - } -} - -// Ensure that a transaction can insert multiple key/value pairs at once. -func TestBucket_Put_Multiple(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - - if err := quick.Check(func(items testdata) bool { - db := MustOpenDB() - defer db.MustClose() - - // Bulk insert all values. - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for _, item := range items { - if err := b.Put(item.Key, item.Value); err != nil { - t.Fatal(err) - } - } - return nil - }); err != nil { - t.Fatal(err) - } - - // Verify all items exist. - if err := db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for _, item := range items { - value := b.Get(item.Key) - if !bytes.Equal(item.Value, value) { - db.CopyTempFile() - t.Fatalf("exp=%x; got=%x", item.Value, value) - } - } - return nil - }); err != nil { - t.Fatal(err) - } - - return true - }, qconfig()); err != nil { - t.Error(err) - } -} - -// Ensure that a transaction can delete all key/value pairs and return to a single leaf page. -func TestBucket_Delete_Quick(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - - if err := quick.Check(func(items testdata) bool { - db := MustOpenDB() - defer db.MustClose() - - // Bulk insert all values. - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for _, item := range items { - if err := b.Put(item.Key, item.Value); err != nil { - t.Fatal(err) - } - } - return nil - }); err != nil { - t.Fatal(err) - } - - // Remove items one at a time and check consistency. - for _, item := range items { - if err := db.Update(func(tx *bolt.Tx) error { - return tx.Bucket([]byte("widgets")).Delete(item.Key) - }); err != nil { - t.Fatal(err) - } - } - - // Anything before our deletion index should be nil. - if err := db.View(func(tx *bolt.Tx) error { - if err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error { - t.Fatalf("bucket should be empty; found: %06x", trunc(k, 3)) - return nil - }); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - return true - }, qconfig()); err != nil { - t.Error(err) - } -} - -func ExampleBucket_Put() { - // Open the database. - db, err := bolt.Open(tempfile(), 0666, nil) - if err != nil { - log.Fatal(err) - } - defer os.Remove(db.Path()) - - // Start a write transaction. - if err := db.Update(func(tx *bolt.Tx) error { - // Create a bucket. - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - return err - } - - // Set the value "bar" for the key "foo". - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - return err - } - return nil - }); err != nil { - log.Fatal(err) - } - - // Read value back in a different read-only transaction. - if err := db.View(func(tx *bolt.Tx) error { - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - fmt.Printf("The value of 'foo' is: %s\n", value) - return nil - }); err != nil { - log.Fatal(err) - } - - // Close database to release file lock. - if err := db.Close(); err != nil { - log.Fatal(err) - } - - // Output: - // The value of 'foo' is: bar -} - -func ExampleBucket_Delete() { - // Open the database. - db, err := bolt.Open(tempfile(), 0666, nil) - if err != nil { - log.Fatal(err) - } - defer os.Remove(db.Path()) - - // Start a write transaction. - if err := db.Update(func(tx *bolt.Tx) error { - // Create a bucket. - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - return err - } - - // Set the value "bar" for the key "foo". - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - return err - } - - // Retrieve the key back from the database and verify it. - value := b.Get([]byte("foo")) - fmt.Printf("The value of 'foo' was: %s\n", value) - - return nil - }); err != nil { - log.Fatal(err) - } - - // Delete the key in a different write transaction. - if err := db.Update(func(tx *bolt.Tx) error { - return tx.Bucket([]byte("widgets")).Delete([]byte("foo")) - }); err != nil { - log.Fatal(err) - } - - // Retrieve the key again. - if err := db.View(func(tx *bolt.Tx) error { - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - if value == nil { - fmt.Printf("The value of 'foo' is now: nil\n") - } - return nil - }); err != nil { - log.Fatal(err) - } - - // Close database to release file lock. - if err := db.Close(); err != nil { - log.Fatal(err) - } - - // Output: - // The value of 'foo' was: bar - // The value of 'foo' is now: nil -} - -func ExampleBucket_ForEach() { - // Open the database. - db, err := bolt.Open(tempfile(), 0666, nil) - if err != nil { - log.Fatal(err) - } - defer os.Remove(db.Path()) - - // Insert data into a bucket. - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("animals")) - if err != nil { - return err - } - - if err := b.Put([]byte("dog"), []byte("fun")); err != nil { - return err - } - if err := b.Put([]byte("cat"), []byte("lame")); err != nil { - return err - } - if err := b.Put([]byte("liger"), []byte("awesome")); err != nil { - return err - } - - // Iterate over items in sorted key order. - if err := b.ForEach(func(k, v []byte) error { - fmt.Printf("A %s is %s.\n", k, v) - return nil - }); err != nil { - return err - } - - return nil - }); err != nil { - log.Fatal(err) - } - - // Close database to release file lock. - if err := db.Close(); err != nil { - log.Fatal(err) - } - - // Output: - // A cat is lame. - // A dog is fun. - // A liger is awesome. -} diff --git a/installer/vendor/github.com/boltdb/bolt/cmd/bolt/main.go b/installer/vendor/github.com/boltdb/bolt/cmd/bolt/main.go deleted file mode 100644 index 057eca50a2..0000000000 --- a/installer/vendor/github.com/boltdb/bolt/cmd/bolt/main.go +++ /dev/null @@ -1,1740 +0,0 @@ -package main - -import ( - "bytes" - "encoding/binary" - "errors" - "flag" - "fmt" - "io" - "io/ioutil" - "math/rand" - "os" - "runtime" - "runtime/pprof" - "strconv" - "strings" - "time" - "unicode" - "unicode/utf8" - "unsafe" - - "github.com/boltdb/bolt" -) - -var ( - // ErrUsage is returned when a usage message was printed and the process - // should simply exit with an error. - ErrUsage = errors.New("usage") - - // ErrUnknownCommand is returned when a CLI command is not specified. - ErrUnknownCommand = errors.New("unknown command") - - // ErrPathRequired is returned when the path to a Bolt database is not specified. - ErrPathRequired = errors.New("path required") - - // ErrFileNotFound is returned when a Bolt database does not exist. - ErrFileNotFound = errors.New("file not found") - - // ErrInvalidValue is returned when a benchmark reads an unexpected value. - ErrInvalidValue = errors.New("invalid value") - - // ErrCorrupt is returned when a checking a data file finds errors. - ErrCorrupt = errors.New("invalid value") - - // ErrNonDivisibleBatchSize is returned when the batch size can't be evenly - // divided by the iteration count. - ErrNonDivisibleBatchSize = errors.New("number of iterations must be divisible by the batch size") - - // ErrPageIDRequired is returned when a required page id is not specified. - ErrPageIDRequired = errors.New("page id required") - - // ErrPageNotFound is returned when specifying a page above the high water mark. - ErrPageNotFound = errors.New("page not found") - - // ErrPageFreed is returned when reading a page that has already been freed. - ErrPageFreed = errors.New("page freed") -) - -// PageHeaderSize represents the size of the bolt.page header. -const PageHeaderSize = 16 - -func main() { - m := NewMain() - if err := m.Run(os.Args[1:]...); err == ErrUsage { - os.Exit(2) - } else if err != nil { - fmt.Println(err.Error()) - os.Exit(1) - } -} - -// Main represents the main program execution. -type Main struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// NewMain returns a new instance of Main connect to the standard input/output. -func NewMain() *Main { - return &Main{ - Stdin: os.Stdin, - Stdout: os.Stdout, - Stderr: os.Stderr, - } -} - -// Run executes the program. -func (m *Main) Run(args ...string) error { - // Require a command at the beginning. - if len(args) == 0 || strings.HasPrefix(args[0], "-") { - fmt.Fprintln(m.Stderr, m.Usage()) - return ErrUsage - } - - // Execute command. - switch args[0] { - case "help": - fmt.Fprintln(m.Stderr, m.Usage()) - return ErrUsage - case "bench": - return newBenchCommand(m).Run(args[1:]...) - case "check": - return newCheckCommand(m).Run(args[1:]...) - case "compact": - return newCompactCommand(m).Run(args[1:]...) - case "dump": - return newDumpCommand(m).Run(args[1:]...) - case "info": - return newInfoCommand(m).Run(args[1:]...) - case "page": - return newPageCommand(m).Run(args[1:]...) - case "pages": - return newPagesCommand(m).Run(args[1:]...) - case "stats": - return newStatsCommand(m).Run(args[1:]...) - default: - return ErrUnknownCommand - } -} - -// Usage returns the help message. -func (m *Main) Usage() string { - return strings.TrimLeft(` -Bolt is a tool for inspecting bolt databases. - -Usage: - - bolt command [arguments] - -The commands are: - - bench run synthetic benchmark against bolt - check verifies integrity of bolt database - compact copies a bolt database, compacting it in the process - info print basic info - help print this screen - pages print list of pages with their types - stats iterate over all pages and generate usage stats - -Use "bolt [command] -h" for more information about a command. -`, "\n") -} - -// CheckCommand represents the "check" command execution. -type CheckCommand struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// NewCheckCommand returns a CheckCommand. -func newCheckCommand(m *Main) *CheckCommand { - return &CheckCommand{ - Stdin: m.Stdin, - Stdout: m.Stdout, - Stderr: m.Stderr, - } -} - -// Run executes the command. -func (cmd *CheckCommand) Run(args ...string) error { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - help := fs.Bool("h", false, "") - if err := fs.Parse(args); err != nil { - return err - } else if *help { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - // Require database path. - path := fs.Arg(0) - if path == "" { - return ErrPathRequired - } else if _, err := os.Stat(path); os.IsNotExist(err) { - return ErrFileNotFound - } - - // Open database. - db, err := bolt.Open(path, 0666, nil) - if err != nil { - return err - } - defer db.Close() - - // Perform consistency check. - return db.View(func(tx *bolt.Tx) error { - var count int - ch := tx.Check() - loop: - for { - select { - case err, ok := <-ch: - if !ok { - break loop - } - fmt.Fprintln(cmd.Stdout, err) - count++ - } - } - - // Print summary of errors. - if count > 0 { - fmt.Fprintf(cmd.Stdout, "%d errors found\n", count) - return ErrCorrupt - } - - // Notify user that database is valid. - fmt.Fprintln(cmd.Stdout, "OK") - return nil - }) -} - -// Usage returns the help message. -func (cmd *CheckCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt check PATH - -Check opens a database at PATH and runs an exhaustive check to verify that -all pages are accessible or are marked as freed. It also verifies that no -pages are double referenced. - -Verification errors will stream out as they are found and the process will -return after all pages have been checked. -`, "\n") -} - -// InfoCommand represents the "info" command execution. -type InfoCommand struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// NewInfoCommand returns a InfoCommand. -func newInfoCommand(m *Main) *InfoCommand { - return &InfoCommand{ - Stdin: m.Stdin, - Stdout: m.Stdout, - Stderr: m.Stderr, - } -} - -// Run executes the command. -func (cmd *InfoCommand) Run(args ...string) error { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - help := fs.Bool("h", false, "") - if err := fs.Parse(args); err != nil { - return err - } else if *help { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - // Require database path. - path := fs.Arg(0) - if path == "" { - return ErrPathRequired - } else if _, err := os.Stat(path); os.IsNotExist(err) { - return ErrFileNotFound - } - - // Open the database. - db, err := bolt.Open(path, 0666, nil) - if err != nil { - return err - } - defer db.Close() - - // Print basic database info. - info := db.Info() - fmt.Fprintf(cmd.Stdout, "Page Size: %d\n", info.PageSize) - - return nil -} - -// Usage returns the help message. -func (cmd *InfoCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt info PATH - -Info prints basic information about the Bolt database at PATH. -`, "\n") -} - -// DumpCommand represents the "dump" command execution. -type DumpCommand struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// newDumpCommand returns a DumpCommand. -func newDumpCommand(m *Main) *DumpCommand { - return &DumpCommand{ - Stdin: m.Stdin, - Stdout: m.Stdout, - Stderr: m.Stderr, - } -} - -// Run executes the command. -func (cmd *DumpCommand) Run(args ...string) error { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - help := fs.Bool("h", false, "") - if err := fs.Parse(args); err != nil { - return err - } else if *help { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - // Require database path and page id. - path := fs.Arg(0) - if path == "" { - return ErrPathRequired - } else if _, err := os.Stat(path); os.IsNotExist(err) { - return ErrFileNotFound - } - - // Read page ids. - pageIDs, err := atois(fs.Args()[1:]) - if err != nil { - return err - } else if len(pageIDs) == 0 { - return ErrPageIDRequired - } - - // Open database to retrieve page size. - pageSize, err := ReadPageSize(path) - if err != nil { - return err - } - - // Open database file handler. - f, err := os.Open(path) - if err != nil { - return err - } - defer func() { _ = f.Close() }() - - // Print each page listed. - for i, pageID := range pageIDs { - // Print a separator. - if i > 0 { - fmt.Fprintln(cmd.Stdout, "===============================================") - } - - // Print page to stdout. - if err := cmd.PrintPage(cmd.Stdout, f, pageID, pageSize); err != nil { - return err - } - } - - return nil -} - -// PrintPage prints a given page as hexadecimal. -func (cmd *DumpCommand) PrintPage(w io.Writer, r io.ReaderAt, pageID int, pageSize int) error { - const bytesPerLineN = 16 - - // Read page into buffer. - buf := make([]byte, pageSize) - addr := pageID * pageSize - if n, err := r.ReadAt(buf, int64(addr)); err != nil { - return err - } else if n != pageSize { - return io.ErrUnexpectedEOF - } - - // Write out to writer in 16-byte lines. - var prev []byte - var skipped bool - for offset := 0; offset < pageSize; offset += bytesPerLineN { - // Retrieve current 16-byte line. - line := buf[offset : offset+bytesPerLineN] - isLastLine := (offset == (pageSize - bytesPerLineN)) - - // If it's the same as the previous line then print a skip. - if bytes.Equal(line, prev) && !isLastLine { - if !skipped { - fmt.Fprintf(w, "%07x *\n", addr+offset) - skipped = true - } - } else { - // Print line as hexadecimal in 2-byte groups. - fmt.Fprintf(w, "%07x %04x %04x %04x %04x %04x %04x %04x %04x\n", addr+offset, - line[0:2], line[2:4], line[4:6], line[6:8], - line[8:10], line[10:12], line[12:14], line[14:16], - ) - - skipped = false - } - - // Save the previous line. - prev = line - } - fmt.Fprint(w, "\n") - - return nil -} - -// Usage returns the help message. -func (cmd *DumpCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt dump -page PAGEID PATH - -Dump prints a hexadecimal dump of a single page. -`, "\n") -} - -// PageCommand represents the "page" command execution. -type PageCommand struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// newPageCommand returns a PageCommand. -func newPageCommand(m *Main) *PageCommand { - return &PageCommand{ - Stdin: m.Stdin, - Stdout: m.Stdout, - Stderr: m.Stderr, - } -} - -// Run executes the command. -func (cmd *PageCommand) Run(args ...string) error { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - help := fs.Bool("h", false, "") - if err := fs.Parse(args); err != nil { - return err - } else if *help { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - // Require database path and page id. - path := fs.Arg(0) - if path == "" { - return ErrPathRequired - } else if _, err := os.Stat(path); os.IsNotExist(err) { - return ErrFileNotFound - } - - // Read page ids. - pageIDs, err := atois(fs.Args()[1:]) - if err != nil { - return err - } else if len(pageIDs) == 0 { - return ErrPageIDRequired - } - - // Open database file handler. - f, err := os.Open(path) - if err != nil { - return err - } - defer func() { _ = f.Close() }() - - // Print each page listed. - for i, pageID := range pageIDs { - // Print a separator. - if i > 0 { - fmt.Fprintln(cmd.Stdout, "===============================================") - } - - // Retrieve page info and page size. - p, buf, err := ReadPage(path, pageID) - if err != nil { - return err - } - - // Print basic page info. - fmt.Fprintf(cmd.Stdout, "Page ID: %d\n", p.id) - fmt.Fprintf(cmd.Stdout, "Page Type: %s\n", p.Type()) - fmt.Fprintf(cmd.Stdout, "Total Size: %d bytes\n", len(buf)) - - // Print type-specific data. - switch p.Type() { - case "meta": - err = cmd.PrintMeta(cmd.Stdout, buf) - case "leaf": - err = cmd.PrintLeaf(cmd.Stdout, buf) - case "branch": - err = cmd.PrintBranch(cmd.Stdout, buf) - case "freelist": - err = cmd.PrintFreelist(cmd.Stdout, buf) - } - if err != nil { - return err - } - } - - return nil -} - -// PrintMeta prints the data from the meta page. -func (cmd *PageCommand) PrintMeta(w io.Writer, buf []byte) error { - m := (*meta)(unsafe.Pointer(&buf[PageHeaderSize])) - fmt.Fprintf(w, "Version: %d\n", m.version) - fmt.Fprintf(w, "Page Size: %d bytes\n", m.pageSize) - fmt.Fprintf(w, "Flags: %08x\n", m.flags) - fmt.Fprintf(w, "Root: \n", m.root.root) - fmt.Fprintf(w, "Freelist: \n", m.freelist) - fmt.Fprintf(w, "HWM: \n", m.pgid) - fmt.Fprintf(w, "Txn ID: %d\n", m.txid) - fmt.Fprintf(w, "Checksum: %016x\n", m.checksum) - fmt.Fprintf(w, "\n") - return nil -} - -// PrintLeaf prints the data for a leaf page. -func (cmd *PageCommand) PrintLeaf(w io.Writer, buf []byte) error { - p := (*page)(unsafe.Pointer(&buf[0])) - - // Print number of items. - fmt.Fprintf(w, "Item Count: %d\n", p.count) - fmt.Fprintf(w, "\n") - - // Print each key/value. - for i := uint16(0); i < p.count; i++ { - e := p.leafPageElement(i) - - // Format key as string. - var k string - if isPrintable(string(e.key())) { - k = fmt.Sprintf("%q", string(e.key())) - } else { - k = fmt.Sprintf("%x", string(e.key())) - } - - // Format value as string. - var v string - if (e.flags & uint32(bucketLeafFlag)) != 0 { - b := (*bucket)(unsafe.Pointer(&e.value()[0])) - v = fmt.Sprintf("", b.root, b.sequence) - } else if isPrintable(string(e.value())) { - v = fmt.Sprintf("%q", string(e.value())) - } else { - v = fmt.Sprintf("%x", string(e.value())) - } - - fmt.Fprintf(w, "%s: %s\n", k, v) - } - fmt.Fprintf(w, "\n") - return nil -} - -// PrintBranch prints the data for a leaf page. -func (cmd *PageCommand) PrintBranch(w io.Writer, buf []byte) error { - p := (*page)(unsafe.Pointer(&buf[0])) - - // Print number of items. - fmt.Fprintf(w, "Item Count: %d\n", p.count) - fmt.Fprintf(w, "\n") - - // Print each key/value. - for i := uint16(0); i < p.count; i++ { - e := p.branchPageElement(i) - - // Format key as string. - var k string - if isPrintable(string(e.key())) { - k = fmt.Sprintf("%q", string(e.key())) - } else { - k = fmt.Sprintf("%x", string(e.key())) - } - - fmt.Fprintf(w, "%s: \n", k, e.pgid) - } - fmt.Fprintf(w, "\n") - return nil -} - -// PrintFreelist prints the data for a freelist page. -func (cmd *PageCommand) PrintFreelist(w io.Writer, buf []byte) error { - p := (*page)(unsafe.Pointer(&buf[0])) - - // Print number of items. - fmt.Fprintf(w, "Item Count: %d\n", p.count) - fmt.Fprintf(w, "\n") - - // Print each page in the freelist. - ids := (*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)) - for i := uint16(0); i < p.count; i++ { - fmt.Fprintf(w, "%d\n", ids[i]) - } - fmt.Fprintf(w, "\n") - return nil -} - -// PrintPage prints a given page as hexadecimal. -func (cmd *PageCommand) PrintPage(w io.Writer, r io.ReaderAt, pageID int, pageSize int) error { - const bytesPerLineN = 16 - - // Read page into buffer. - buf := make([]byte, pageSize) - addr := pageID * pageSize - if n, err := r.ReadAt(buf, int64(addr)); err != nil { - return err - } else if n != pageSize { - return io.ErrUnexpectedEOF - } - - // Write out to writer in 16-byte lines. - var prev []byte - var skipped bool - for offset := 0; offset < pageSize; offset += bytesPerLineN { - // Retrieve current 16-byte line. - line := buf[offset : offset+bytesPerLineN] - isLastLine := (offset == (pageSize - bytesPerLineN)) - - // If it's the same as the previous line then print a skip. - if bytes.Equal(line, prev) && !isLastLine { - if !skipped { - fmt.Fprintf(w, "%07x *\n", addr+offset) - skipped = true - } - } else { - // Print line as hexadecimal in 2-byte groups. - fmt.Fprintf(w, "%07x %04x %04x %04x %04x %04x %04x %04x %04x\n", addr+offset, - line[0:2], line[2:4], line[4:6], line[6:8], - line[8:10], line[10:12], line[12:14], line[14:16], - ) - - skipped = false - } - - // Save the previous line. - prev = line - } - fmt.Fprint(w, "\n") - - return nil -} - -// Usage returns the help message. -func (cmd *PageCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt page -page PATH pageid [pageid...] - -Page prints one or more pages in human readable format. -`, "\n") -} - -// PagesCommand represents the "pages" command execution. -type PagesCommand struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// NewPagesCommand returns a PagesCommand. -func newPagesCommand(m *Main) *PagesCommand { - return &PagesCommand{ - Stdin: m.Stdin, - Stdout: m.Stdout, - Stderr: m.Stderr, - } -} - -// Run executes the command. -func (cmd *PagesCommand) Run(args ...string) error { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - help := fs.Bool("h", false, "") - if err := fs.Parse(args); err != nil { - return err - } else if *help { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - // Require database path. - path := fs.Arg(0) - if path == "" { - return ErrPathRequired - } else if _, err := os.Stat(path); os.IsNotExist(err) { - return ErrFileNotFound - } - - // Open database. - db, err := bolt.Open(path, 0666, nil) - if err != nil { - return err - } - defer func() { _ = db.Close() }() - - // Write header. - fmt.Fprintln(cmd.Stdout, "ID TYPE ITEMS OVRFLW") - fmt.Fprintln(cmd.Stdout, "======== ========== ====== ======") - - return db.Update(func(tx *bolt.Tx) error { - var id int - for { - p, err := tx.Page(id) - if err != nil { - return &PageError{ID: id, Err: err} - } else if p == nil { - break - } - - // Only display count and overflow if this is a non-free page. - var count, overflow string - if p.Type != "free" { - count = strconv.Itoa(p.Count) - if p.OverflowCount > 0 { - overflow = strconv.Itoa(p.OverflowCount) - } - } - - // Print table row. - fmt.Fprintf(cmd.Stdout, "%-8d %-10s %-6s %-6s\n", p.ID, p.Type, count, overflow) - - // Move to the next non-overflow page. - id += 1 - if p.Type != "free" { - id += p.OverflowCount - } - } - return nil - }) -} - -// Usage returns the help message. -func (cmd *PagesCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt pages PATH - -Pages prints a table of pages with their type (meta, leaf, branch, freelist). -Leaf and branch pages will show a key count in the "items" column while the -freelist will show the number of free pages in the "items" column. - -The "overflow" column shows the number of blocks that the page spills over -into. Normally there is no overflow but large keys and values can cause -a single page to take up multiple blocks. -`, "\n") -} - -// StatsCommand represents the "stats" command execution. -type StatsCommand struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// NewStatsCommand returns a StatsCommand. -func newStatsCommand(m *Main) *StatsCommand { - return &StatsCommand{ - Stdin: m.Stdin, - Stdout: m.Stdout, - Stderr: m.Stderr, - } -} - -// Run executes the command. -func (cmd *StatsCommand) Run(args ...string) error { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - help := fs.Bool("h", false, "") - if err := fs.Parse(args); err != nil { - return err - } else if *help { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - // Require database path. - path, prefix := fs.Arg(0), fs.Arg(1) - if path == "" { - return ErrPathRequired - } else if _, err := os.Stat(path); os.IsNotExist(err) { - return ErrFileNotFound - } - - // Open database. - db, err := bolt.Open(path, 0666, nil) - if err != nil { - return err - } - defer db.Close() - - return db.View(func(tx *bolt.Tx) error { - var s bolt.BucketStats - var count int - if err := tx.ForEach(func(name []byte, b *bolt.Bucket) error { - if bytes.HasPrefix(name, []byte(prefix)) { - s.Add(b.Stats()) - count += 1 - } - return nil - }); err != nil { - return err - } - - fmt.Fprintf(cmd.Stdout, "Aggregate statistics for %d buckets\n\n", count) - - fmt.Fprintln(cmd.Stdout, "Page count statistics") - fmt.Fprintf(cmd.Stdout, "\tNumber of logical branch pages: %d\n", s.BranchPageN) - fmt.Fprintf(cmd.Stdout, "\tNumber of physical branch overflow pages: %d\n", s.BranchOverflowN) - fmt.Fprintf(cmd.Stdout, "\tNumber of logical leaf pages: %d\n", s.LeafPageN) - fmt.Fprintf(cmd.Stdout, "\tNumber of physical leaf overflow pages: %d\n", s.LeafOverflowN) - - fmt.Fprintln(cmd.Stdout, "Tree statistics") - fmt.Fprintf(cmd.Stdout, "\tNumber of keys/value pairs: %d\n", s.KeyN) - fmt.Fprintf(cmd.Stdout, "\tNumber of levels in B+tree: %d\n", s.Depth) - - fmt.Fprintln(cmd.Stdout, "Page size utilization") - fmt.Fprintf(cmd.Stdout, "\tBytes allocated for physical branch pages: %d\n", s.BranchAlloc) - var percentage int - if s.BranchAlloc != 0 { - percentage = int(float32(s.BranchInuse) * 100.0 / float32(s.BranchAlloc)) - } - fmt.Fprintf(cmd.Stdout, "\tBytes actually used for branch data: %d (%d%%)\n", s.BranchInuse, percentage) - fmt.Fprintf(cmd.Stdout, "\tBytes allocated for physical leaf pages: %d\n", s.LeafAlloc) - percentage = 0 - if s.LeafAlloc != 0 { - percentage = int(float32(s.LeafInuse) * 100.0 / float32(s.LeafAlloc)) - } - fmt.Fprintf(cmd.Stdout, "\tBytes actually used for leaf data: %d (%d%%)\n", s.LeafInuse, percentage) - - fmt.Fprintln(cmd.Stdout, "Bucket statistics") - fmt.Fprintf(cmd.Stdout, "\tTotal number of buckets: %d\n", s.BucketN) - percentage = 0 - if s.BucketN != 0 { - percentage = int(float32(s.InlineBucketN) * 100.0 / float32(s.BucketN)) - } - fmt.Fprintf(cmd.Stdout, "\tTotal number on inlined buckets: %d (%d%%)\n", s.InlineBucketN, percentage) - percentage = 0 - if s.LeafInuse != 0 { - percentage = int(float32(s.InlineBucketInuse) * 100.0 / float32(s.LeafInuse)) - } - fmt.Fprintf(cmd.Stdout, "\tBytes used for inlined buckets: %d (%d%%)\n", s.InlineBucketInuse, percentage) - - return nil - }) -} - -// Usage returns the help message. -func (cmd *StatsCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt stats PATH - -Stats performs an extensive search of the database to track every page -reference. It starts at the current meta page and recursively iterates -through every accessible bucket. - -The following errors can be reported: - - already freed - The page is referenced more than once in the freelist. - - unreachable unfreed - The page is not referenced by a bucket or in the freelist. - - reachable freed - The page is referenced by a bucket but is also in the freelist. - - out of bounds - A page is referenced that is above the high water mark. - - multiple references - A page is referenced by more than one other page. - - invalid type - The page type is not "meta", "leaf", "branch", or "freelist". - -No errors should occur in your database. However, if for some reason you -experience corruption, please submit a ticket to the Bolt project page: - - https://github.com/boltdb/bolt/issues -`, "\n") -} - -var benchBucketName = []byte("bench") - -// BenchCommand represents the "bench" command execution. -type BenchCommand struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// NewBenchCommand returns a BenchCommand using the -func newBenchCommand(m *Main) *BenchCommand { - return &BenchCommand{ - Stdin: m.Stdin, - Stdout: m.Stdout, - Stderr: m.Stderr, - } -} - -// Run executes the "bench" command. -func (cmd *BenchCommand) Run(args ...string) error { - // Parse CLI arguments. - options, err := cmd.ParseFlags(args) - if err != nil { - return err - } - - // Remove path if "-work" is not set. Otherwise keep path. - if options.Work { - fmt.Fprintf(cmd.Stdout, "work: %s\n", options.Path) - } else { - defer os.Remove(options.Path) - } - - // Create database. - db, err := bolt.Open(options.Path, 0666, nil) - if err != nil { - return err - } - db.NoSync = options.NoSync - defer db.Close() - - // Write to the database. - var results BenchResults - if err := cmd.runWrites(db, options, &results); err != nil { - return fmt.Errorf("write: %v", err) - } - - // Read from the database. - if err := cmd.runReads(db, options, &results); err != nil { - return fmt.Errorf("bench: read: %s", err) - } - - // Print results. - fmt.Fprintf(os.Stderr, "# Write\t%v\t(%v/op)\t(%v op/sec)\n", results.WriteDuration, results.WriteOpDuration(), results.WriteOpsPerSecond()) - fmt.Fprintf(os.Stderr, "# Read\t%v\t(%v/op)\t(%v op/sec)\n", results.ReadDuration, results.ReadOpDuration(), results.ReadOpsPerSecond()) - fmt.Fprintln(os.Stderr, "") - return nil -} - -// ParseFlags parses the command line flags. -func (cmd *BenchCommand) ParseFlags(args []string) (*BenchOptions, error) { - var options BenchOptions - - // Parse flagset. - fs := flag.NewFlagSet("", flag.ContinueOnError) - fs.StringVar(&options.ProfileMode, "profile-mode", "rw", "") - fs.StringVar(&options.WriteMode, "write-mode", "seq", "") - fs.StringVar(&options.ReadMode, "read-mode", "seq", "") - fs.IntVar(&options.Iterations, "count", 1000, "") - fs.IntVar(&options.BatchSize, "batch-size", 0, "") - fs.IntVar(&options.KeySize, "key-size", 8, "") - fs.IntVar(&options.ValueSize, "value-size", 32, "") - fs.StringVar(&options.CPUProfile, "cpuprofile", "", "") - fs.StringVar(&options.MemProfile, "memprofile", "", "") - fs.StringVar(&options.BlockProfile, "blockprofile", "", "") - fs.Float64Var(&options.FillPercent, "fill-percent", bolt.DefaultFillPercent, "") - fs.BoolVar(&options.NoSync, "no-sync", false, "") - fs.BoolVar(&options.Work, "work", false, "") - fs.StringVar(&options.Path, "path", "", "") - fs.SetOutput(cmd.Stderr) - if err := fs.Parse(args); err != nil { - return nil, err - } - - // Set batch size to iteration size if not set. - // Require that batch size can be evenly divided by the iteration count. - if options.BatchSize == 0 { - options.BatchSize = options.Iterations - } else if options.Iterations%options.BatchSize != 0 { - return nil, ErrNonDivisibleBatchSize - } - - // Generate temp path if one is not passed in. - if options.Path == "" { - f, err := ioutil.TempFile("", "bolt-bench-") - if err != nil { - return nil, fmt.Errorf("temp file: %s", err) - } - f.Close() - os.Remove(f.Name()) - options.Path = f.Name() - } - - return &options, nil -} - -// Writes to the database. -func (cmd *BenchCommand) runWrites(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - // Start profiling for writes. - if options.ProfileMode == "rw" || options.ProfileMode == "w" { - cmd.startProfiling(options) - } - - t := time.Now() - - var err error - switch options.WriteMode { - case "seq": - err = cmd.runWritesSequential(db, options, results) - case "rnd": - err = cmd.runWritesRandom(db, options, results) - case "seq-nest": - err = cmd.runWritesSequentialNested(db, options, results) - case "rnd-nest": - err = cmd.runWritesRandomNested(db, options, results) - default: - return fmt.Errorf("invalid write mode: %s", options.WriteMode) - } - - // Save time to write. - results.WriteDuration = time.Since(t) - - // Stop profiling for writes only. - if options.ProfileMode == "w" { - cmd.stopProfiling() - } - - return err -} - -func (cmd *BenchCommand) runWritesSequential(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - var i = uint32(0) - return cmd.runWritesWithSource(db, options, results, func() uint32 { i++; return i }) -} - -func (cmd *BenchCommand) runWritesRandom(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - r := rand.New(rand.NewSource(time.Now().UnixNano())) - return cmd.runWritesWithSource(db, options, results, func() uint32 { return r.Uint32() }) -} - -func (cmd *BenchCommand) runWritesSequentialNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - var i = uint32(0) - return cmd.runWritesWithSource(db, options, results, func() uint32 { i++; return i }) -} - -func (cmd *BenchCommand) runWritesRandomNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - r := rand.New(rand.NewSource(time.Now().UnixNano())) - return cmd.runWritesWithSource(db, options, results, func() uint32 { return r.Uint32() }) -} - -func (cmd *BenchCommand) runWritesWithSource(db *bolt.DB, options *BenchOptions, results *BenchResults, keySource func() uint32) error { - results.WriteOps = options.Iterations - - for i := 0; i < options.Iterations; i += options.BatchSize { - if err := db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucketIfNotExists(benchBucketName) - b.FillPercent = options.FillPercent - - for j := 0; j < options.BatchSize; j++ { - key := make([]byte, options.KeySize) - value := make([]byte, options.ValueSize) - - // Write key as uint32. - binary.BigEndian.PutUint32(key, keySource()) - - // Insert key/value. - if err := b.Put(key, value); err != nil { - return err - } - } - - return nil - }); err != nil { - return err - } - } - return nil -} - -func (cmd *BenchCommand) runWritesNestedWithSource(db *bolt.DB, options *BenchOptions, results *BenchResults, keySource func() uint32) error { - results.WriteOps = options.Iterations - - for i := 0; i < options.Iterations; i += options.BatchSize { - if err := db.Update(func(tx *bolt.Tx) error { - top, err := tx.CreateBucketIfNotExists(benchBucketName) - if err != nil { - return err - } - top.FillPercent = options.FillPercent - - // Create bucket key. - name := make([]byte, options.KeySize) - binary.BigEndian.PutUint32(name, keySource()) - - // Create bucket. - b, err := top.CreateBucketIfNotExists(name) - if err != nil { - return err - } - b.FillPercent = options.FillPercent - - for j := 0; j < options.BatchSize; j++ { - var key = make([]byte, options.KeySize) - var value = make([]byte, options.ValueSize) - - // Generate key as uint32. - binary.BigEndian.PutUint32(key, keySource()) - - // Insert value into subbucket. - if err := b.Put(key, value); err != nil { - return err - } - } - - return nil - }); err != nil { - return err - } - } - return nil -} - -// Reads from the database. -func (cmd *BenchCommand) runReads(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - // Start profiling for reads. - if options.ProfileMode == "r" { - cmd.startProfiling(options) - } - - t := time.Now() - - var err error - switch options.ReadMode { - case "seq": - switch options.WriteMode { - case "seq-nest", "rnd-nest": - err = cmd.runReadsSequentialNested(db, options, results) - default: - err = cmd.runReadsSequential(db, options, results) - } - default: - return fmt.Errorf("invalid read mode: %s", options.ReadMode) - } - - // Save read time. - results.ReadDuration = time.Since(t) - - // Stop profiling for reads. - if options.ProfileMode == "rw" || options.ProfileMode == "r" { - cmd.stopProfiling() - } - - return err -} - -func (cmd *BenchCommand) runReadsSequential(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - return db.View(func(tx *bolt.Tx) error { - t := time.Now() - - for { - var count int - - c := tx.Bucket(benchBucketName).Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - if v == nil { - return errors.New("invalid value") - } - count++ - } - - if options.WriteMode == "seq" && count != options.Iterations { - return fmt.Errorf("read seq: iter mismatch: expected %d, got %d", options.Iterations, count) - } - - results.ReadOps += count - - // Make sure we do this for at least a second. - if time.Since(t) >= time.Second { - break - } - } - - return nil - }) -} - -func (cmd *BenchCommand) runReadsSequentialNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - return db.View(func(tx *bolt.Tx) error { - t := time.Now() - - for { - var count int - var top = tx.Bucket(benchBucketName) - if err := top.ForEach(func(name, _ []byte) error { - c := top.Bucket(name).Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - if v == nil { - return ErrInvalidValue - } - count++ - } - return nil - }); err != nil { - return err - } - - if options.WriteMode == "seq-nest" && count != options.Iterations { - return fmt.Errorf("read seq-nest: iter mismatch: expected %d, got %d", options.Iterations, count) - } - - results.ReadOps += count - - // Make sure we do this for at least a second. - if time.Since(t) >= time.Second { - break - } - } - - return nil - }) -} - -// File handlers for the various profiles. -var cpuprofile, memprofile, blockprofile *os.File - -// Starts all profiles set on the options. -func (cmd *BenchCommand) startProfiling(options *BenchOptions) { - var err error - - // Start CPU profiling. - if options.CPUProfile != "" { - cpuprofile, err = os.Create(options.CPUProfile) - if err != nil { - fmt.Fprintf(cmd.Stderr, "bench: could not create cpu profile %q: %v\n", options.CPUProfile, err) - os.Exit(1) - } - pprof.StartCPUProfile(cpuprofile) - } - - // Start memory profiling. - if options.MemProfile != "" { - memprofile, err = os.Create(options.MemProfile) - if err != nil { - fmt.Fprintf(cmd.Stderr, "bench: could not create memory profile %q: %v\n", options.MemProfile, err) - os.Exit(1) - } - runtime.MemProfileRate = 4096 - } - - // Start fatal profiling. - if options.BlockProfile != "" { - blockprofile, err = os.Create(options.BlockProfile) - if err != nil { - fmt.Fprintf(cmd.Stderr, "bench: could not create block profile %q: %v\n", options.BlockProfile, err) - os.Exit(1) - } - runtime.SetBlockProfileRate(1) - } -} - -// Stops all profiles. -func (cmd *BenchCommand) stopProfiling() { - if cpuprofile != nil { - pprof.StopCPUProfile() - cpuprofile.Close() - cpuprofile = nil - } - - if memprofile != nil { - pprof.Lookup("heap").WriteTo(memprofile, 0) - memprofile.Close() - memprofile = nil - } - - if blockprofile != nil { - pprof.Lookup("block").WriteTo(blockprofile, 0) - blockprofile.Close() - blockprofile = nil - runtime.SetBlockProfileRate(0) - } -} - -// BenchOptions represents the set of options that can be passed to "bolt bench". -type BenchOptions struct { - ProfileMode string - WriteMode string - ReadMode string - Iterations int - BatchSize int - KeySize int - ValueSize int - CPUProfile string - MemProfile string - BlockProfile string - StatsInterval time.Duration - FillPercent float64 - NoSync bool - Work bool - Path string -} - -// BenchResults represents the performance results of the benchmark. -type BenchResults struct { - WriteOps int - WriteDuration time.Duration - ReadOps int - ReadDuration time.Duration -} - -// Returns the duration for a single write operation. -func (r *BenchResults) WriteOpDuration() time.Duration { - if r.WriteOps == 0 { - return 0 - } - return r.WriteDuration / time.Duration(r.WriteOps) -} - -// Returns average number of write operations that can be performed per second. -func (r *BenchResults) WriteOpsPerSecond() int { - var op = r.WriteOpDuration() - if op == 0 { - return 0 - } - return int(time.Second) / int(op) -} - -// Returns the duration for a single read operation. -func (r *BenchResults) ReadOpDuration() time.Duration { - if r.ReadOps == 0 { - return 0 - } - return r.ReadDuration / time.Duration(r.ReadOps) -} - -// Returns average number of read operations that can be performed per second. -func (r *BenchResults) ReadOpsPerSecond() int { - var op = r.ReadOpDuration() - if op == 0 { - return 0 - } - return int(time.Second) / int(op) -} - -type PageError struct { - ID int - Err error -} - -func (e *PageError) Error() string { - return fmt.Sprintf("page error: id=%d, err=%s", e.ID, e.Err) -} - -// isPrintable returns true if the string is valid unicode and contains only printable runes. -func isPrintable(s string) bool { - if !utf8.ValidString(s) { - return false - } - for _, ch := range s { - if !unicode.IsPrint(ch) { - return false - } - } - return true -} - -// ReadPage reads page info & full page data from a path. -// This is not transactionally safe. -func ReadPage(path string, pageID int) (*page, []byte, error) { - // Find page size. - pageSize, err := ReadPageSize(path) - if err != nil { - return nil, nil, fmt.Errorf("read page size: %s", err) - } - - // Open database file. - f, err := os.Open(path) - if err != nil { - return nil, nil, err - } - defer f.Close() - - // Read one block into buffer. - buf := make([]byte, pageSize) - if n, err := f.ReadAt(buf, int64(pageID*pageSize)); err != nil { - return nil, nil, err - } else if n != len(buf) { - return nil, nil, io.ErrUnexpectedEOF - } - - // Determine total number of blocks. - p := (*page)(unsafe.Pointer(&buf[0])) - overflowN := p.overflow - - // Re-read entire page (with overflow) into buffer. - buf = make([]byte, (int(overflowN)+1)*pageSize) - if n, err := f.ReadAt(buf, int64(pageID*pageSize)); err != nil { - return nil, nil, err - } else if n != len(buf) { - return nil, nil, io.ErrUnexpectedEOF - } - p = (*page)(unsafe.Pointer(&buf[0])) - - return p, buf, nil -} - -// ReadPageSize reads page size a path. -// This is not transactionally safe. -func ReadPageSize(path string) (int, error) { - // Open database file. - f, err := os.Open(path) - if err != nil { - return 0, err - } - defer f.Close() - - // Read 4KB chunk. - buf := make([]byte, 4096) - if _, err := io.ReadFull(f, buf); err != nil { - return 0, err - } - - // Read page size from metadata. - m := (*meta)(unsafe.Pointer(&buf[PageHeaderSize])) - return int(m.pageSize), nil -} - -// atois parses a slice of strings into integers. -func atois(strs []string) ([]int, error) { - var a []int - for _, str := range strs { - i, err := strconv.Atoi(str) - if err != nil { - return nil, err - } - a = append(a, i) - } - return a, nil -} - -// DO NOT EDIT. Copied from the "bolt" package. -const maxAllocSize = 0xFFFFFFF - -// DO NOT EDIT. Copied from the "bolt" package. -const ( - branchPageFlag = 0x01 - leafPageFlag = 0x02 - metaPageFlag = 0x04 - freelistPageFlag = 0x10 -) - -// DO NOT EDIT. Copied from the "bolt" package. -const bucketLeafFlag = 0x01 - -// DO NOT EDIT. Copied from the "bolt" package. -type pgid uint64 - -// DO NOT EDIT. Copied from the "bolt" package. -type txid uint64 - -// DO NOT EDIT. Copied from the "bolt" package. -type meta struct { - magic uint32 - version uint32 - pageSize uint32 - flags uint32 - root bucket - freelist pgid - pgid pgid - txid txid - checksum uint64 -} - -// DO NOT EDIT. Copied from the "bolt" package. -type bucket struct { - root pgid - sequence uint64 -} - -// DO NOT EDIT. Copied from the "bolt" package. -type page struct { - id pgid - flags uint16 - count uint16 - overflow uint32 - ptr uintptr -} - -// DO NOT EDIT. Copied from the "bolt" package. -func (p *page) Type() string { - if (p.flags & branchPageFlag) != 0 { - return "branch" - } else if (p.flags & leafPageFlag) != 0 { - return "leaf" - } else if (p.flags & metaPageFlag) != 0 { - return "meta" - } else if (p.flags & freelistPageFlag) != 0 { - return "freelist" - } - return fmt.Sprintf("unknown<%02x>", p.flags) -} - -// DO NOT EDIT. Copied from the "bolt" package. -func (p *page) leafPageElement(index uint16) *leafPageElement { - n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index] - return n -} - -// DO NOT EDIT. Copied from the "bolt" package. -func (p *page) branchPageElement(index uint16) *branchPageElement { - return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index] -} - -// DO NOT EDIT. Copied from the "bolt" package. -type branchPageElement struct { - pos uint32 - ksize uint32 - pgid pgid -} - -// DO NOT EDIT. Copied from the "bolt" package. -func (n *branchPageElement) key() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return buf[n.pos : n.pos+n.ksize] -} - -// DO NOT EDIT. Copied from the "bolt" package. -type leafPageElement struct { - flags uint32 - pos uint32 - ksize uint32 - vsize uint32 -} - -// DO NOT EDIT. Copied from the "bolt" package. -func (n *leafPageElement) key() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return buf[n.pos : n.pos+n.ksize] -} - -// DO NOT EDIT. Copied from the "bolt" package. -func (n *leafPageElement) value() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return buf[n.pos+n.ksize : n.pos+n.ksize+n.vsize] -} - -// CompactCommand represents the "compact" command execution. -type CompactCommand struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer - - SrcPath string - DstPath string - TxMaxSize int64 -} - -// newCompactCommand returns a CompactCommand. -func newCompactCommand(m *Main) *CompactCommand { - return &CompactCommand{ - Stdin: m.Stdin, - Stdout: m.Stdout, - Stderr: m.Stderr, - } -} - -// Run executes the command. -func (cmd *CompactCommand) Run(args ...string) (err error) { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - fs.SetOutput(ioutil.Discard) - fs.StringVar(&cmd.DstPath, "o", "", "") - fs.Int64Var(&cmd.TxMaxSize, "tx-max-size", 65536, "") - if err := fs.Parse(args); err == flag.ErrHelp { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } else if err != nil { - return err - } else if cmd.DstPath == "" { - return fmt.Errorf("output file required") - } - - // Require database paths. - cmd.SrcPath = fs.Arg(0) - if cmd.SrcPath == "" { - return ErrPathRequired - } - - // Ensure source file exists. - fi, err := os.Stat(cmd.SrcPath) - if os.IsNotExist(err) { - return ErrFileNotFound - } else if err != nil { - return err - } - initialSize := fi.Size() - - // Open source database. - src, err := bolt.Open(cmd.SrcPath, 0444, nil) - if err != nil { - return err - } - defer src.Close() - - // Open destination database. - dst, err := bolt.Open(cmd.DstPath, fi.Mode(), nil) - if err != nil { - return err - } - defer dst.Close() - - // Run compaction. - if err := cmd.compact(dst, src); err != nil { - return err - } - - // Report stats on new size. - fi, err = os.Stat(cmd.DstPath) - if err != nil { - return err - } else if fi.Size() == 0 { - return fmt.Errorf("zero db size") - } - fmt.Fprintf(cmd.Stdout, "%d -> %d bytes (gain=%.2fx)\n", initialSize, fi.Size(), float64(initialSize)/float64(fi.Size())) - - return nil -} - -func (cmd *CompactCommand) compact(dst, src *bolt.DB) error { - // commit regularly, or we'll run out of memory for large datasets if using one transaction. - var size int64 - tx, err := dst.Begin(true) - if err != nil { - return err - } - defer tx.Rollback() - - if err := cmd.walk(src, func(keys [][]byte, k, v []byte, seq uint64) error { - // On each key/value, check if we have exceeded tx size. - sz := int64(len(k) + len(v)) - if size+sz > cmd.TxMaxSize && cmd.TxMaxSize != 0 { - // Commit previous transaction. - if err := tx.Commit(); err != nil { - return err - } - - // Start new transaction. - tx, err = dst.Begin(true) - if err != nil { - return err - } - size = 0 - } - size += sz - - // Create bucket on the root transaction if this is the first level. - nk := len(keys) - if nk == 0 { - bkt, err := tx.CreateBucket(k) - if err != nil { - return err - } - if err := bkt.SetSequence(seq); err != nil { - return err - } - return nil - } - - // Create buckets on subsequent levels, if necessary. - b := tx.Bucket(keys[0]) - if nk > 1 { - for _, k := range keys[1:] { - b = b.Bucket(k) - } - } - - // If there is no value then this is a bucket call. - if v == nil { - bkt, err := b.CreateBucket(k) - if err != nil { - return err - } - if err := bkt.SetSequence(seq); err != nil { - return err - } - return nil - } - - // Otherwise treat it as a key/value pair. - return b.Put(k, v) - }); err != nil { - return err - } - - return tx.Commit() -} - -// walkFunc is the type of the function called for keys (buckets and "normal" -// values) discovered by Walk. keys is the list of keys to descend to the bucket -// owning the discovered key/value pair k/v. -type walkFunc func(keys [][]byte, k, v []byte, seq uint64) error - -// walk walks recursively the bolt database db, calling walkFn for each key it finds. -func (cmd *CompactCommand) walk(db *bolt.DB, walkFn walkFunc) error { - return db.View(func(tx *bolt.Tx) error { - return tx.ForEach(func(name []byte, b *bolt.Bucket) error { - return cmd.walkBucket(b, nil, name, nil, b.Sequence(), walkFn) - }) - }) -} - -func (cmd *CompactCommand) walkBucket(b *bolt.Bucket, keypath [][]byte, k, v []byte, seq uint64, fn walkFunc) error { - // Execute callback. - if err := fn(keypath, k, v, seq); err != nil { - return err - } - - // If this is not a bucket then stop. - if v != nil { - return nil - } - - // Iterate over each child key/value. - keypath = append(keypath, k) - return b.ForEach(func(k, v []byte) error { - if v == nil { - bkt := b.Bucket(k) - return cmd.walkBucket(bkt, keypath, k, nil, bkt.Sequence(), fn) - } - return cmd.walkBucket(b, keypath, k, v, b.Sequence(), fn) - }) -} - -// Usage returns the help message. -func (cmd *CompactCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt compact [options] -o DST SRC - -Compact opens a database at SRC path and walks it recursively, copying keys -as they are found from all buckets, to a newly created database at DST path. - -The original database is left untouched. - -Additional options include: - - -tx-max-size NUM - Specifies the maximum size of individual transactions. - Defaults to 64KB. -`, "\n") -} diff --git a/installer/vendor/github.com/boltdb/bolt/cmd/bolt/main_test.go b/installer/vendor/github.com/boltdb/bolt/cmd/bolt/main_test.go deleted file mode 100644 index 0a11ff3395..0000000000 --- a/installer/vendor/github.com/boltdb/bolt/cmd/bolt/main_test.go +++ /dev/null @@ -1,356 +0,0 @@ -package main_test - -import ( - "bytes" - crypto "crypto/rand" - "encoding/binary" - "fmt" - "io" - "io/ioutil" - "math/rand" - "os" - "strconv" - "testing" - - "github.com/boltdb/bolt" - "github.com/boltdb/bolt/cmd/bolt" -) - -// Ensure the "info" command can print information about a database. -func TestInfoCommand_Run(t *testing.T) { - db := MustOpen(0666, nil) - db.DB.Close() - defer db.Close() - - // Run the info command. - m := NewMain() - if err := m.Run("info", db.Path); err != nil { - t.Fatal(err) - } -} - -// Ensure the "stats" command executes correctly with an empty database. -func TestStatsCommand_Run_EmptyDatabase(t *testing.T) { - // Ignore - if os.Getpagesize() != 4096 { - t.Skip("system does not use 4KB page size") - } - - db := MustOpen(0666, nil) - defer db.Close() - db.DB.Close() - - // Generate expected result. - exp := "Aggregate statistics for 0 buckets\n\n" + - "Page count statistics\n" + - "\tNumber of logical branch pages: 0\n" + - "\tNumber of physical branch overflow pages: 0\n" + - "\tNumber of logical leaf pages: 0\n" + - "\tNumber of physical leaf overflow pages: 0\n" + - "Tree statistics\n" + - "\tNumber of keys/value pairs: 0\n" + - "\tNumber of levels in B+tree: 0\n" + - "Page size utilization\n" + - "\tBytes allocated for physical branch pages: 0\n" + - "\tBytes actually used for branch data: 0 (0%)\n" + - "\tBytes allocated for physical leaf pages: 0\n" + - "\tBytes actually used for leaf data: 0 (0%)\n" + - "Bucket statistics\n" + - "\tTotal number of buckets: 0\n" + - "\tTotal number on inlined buckets: 0 (0%)\n" + - "\tBytes used for inlined buckets: 0 (0%)\n" - - // Run the command. - m := NewMain() - if err := m.Run("stats", db.Path); err != nil { - t.Fatal(err) - } else if m.Stdout.String() != exp { - t.Fatalf("unexpected stdout:\n\n%s", m.Stdout.String()) - } -} - -// Ensure the "stats" command can execute correctly. -func TestStatsCommand_Run(t *testing.T) { - // Ignore - if os.Getpagesize() != 4096 { - t.Skip("system does not use 4KB page size") - } - - db := MustOpen(0666, nil) - defer db.Close() - - if err := db.Update(func(tx *bolt.Tx) error { - // Create "foo" bucket. - b, err := tx.CreateBucket([]byte("foo")) - if err != nil { - return err - } - for i := 0; i < 10; i++ { - if err := b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil { - return err - } - } - - // Create "bar" bucket. - b, err = tx.CreateBucket([]byte("bar")) - if err != nil { - return err - } - for i := 0; i < 100; i++ { - if err := b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil { - return err - } - } - - // Create "baz" bucket. - b, err = tx.CreateBucket([]byte("baz")) - if err != nil { - return err - } - if err := b.Put([]byte("key"), []byte("value")); err != nil { - return err - } - - return nil - }); err != nil { - t.Fatal(err) - } - db.DB.Close() - - // Generate expected result. - exp := "Aggregate statistics for 3 buckets\n\n" + - "Page count statistics\n" + - "\tNumber of logical branch pages: 0\n" + - "\tNumber of physical branch overflow pages: 0\n" + - "\tNumber of logical leaf pages: 1\n" + - "\tNumber of physical leaf overflow pages: 0\n" + - "Tree statistics\n" + - "\tNumber of keys/value pairs: 111\n" + - "\tNumber of levels in B+tree: 1\n" + - "Page size utilization\n" + - "\tBytes allocated for physical branch pages: 0\n" + - "\tBytes actually used for branch data: 0 (0%)\n" + - "\tBytes allocated for physical leaf pages: 4096\n" + - "\tBytes actually used for leaf data: 1996 (48%)\n" + - "Bucket statistics\n" + - "\tTotal number of buckets: 3\n" + - "\tTotal number on inlined buckets: 2 (66%)\n" + - "\tBytes used for inlined buckets: 236 (11%)\n" - - // Run the command. - m := NewMain() - if err := m.Run("stats", db.Path); err != nil { - t.Fatal(err) - } else if m.Stdout.String() != exp { - t.Fatalf("unexpected stdout:\n\n%s", m.Stdout.String()) - } -} - -// Main represents a test wrapper for main.Main that records output. -type Main struct { - *main.Main - Stdin bytes.Buffer - Stdout bytes.Buffer - Stderr bytes.Buffer -} - -// NewMain returns a new instance of Main. -func NewMain() *Main { - m := &Main{Main: main.NewMain()} - m.Main.Stdin = &m.Stdin - m.Main.Stdout = &m.Stdout - m.Main.Stderr = &m.Stderr - return m -} - -// MustOpen creates a Bolt database in a temporary location. -func MustOpen(mode os.FileMode, options *bolt.Options) *DB { - // Create temporary path. - f, _ := ioutil.TempFile("", "bolt-") - f.Close() - os.Remove(f.Name()) - - db, err := bolt.Open(f.Name(), mode, options) - if err != nil { - panic(err.Error()) - } - return &DB{DB: db, Path: f.Name()} -} - -// DB is a test wrapper for bolt.DB. -type DB struct { - *bolt.DB - Path string -} - -// Close closes and removes the database. -func (db *DB) Close() error { - defer os.Remove(db.Path) - return db.DB.Close() -} - -func TestCompactCommand_Run(t *testing.T) { - var s int64 - if err := binary.Read(crypto.Reader, binary.BigEndian, &s); err != nil { - t.Fatal(err) - } - rand.Seed(s) - - dstdb := MustOpen(0666, nil) - dstdb.Close() - - // fill the db - db := MustOpen(0666, nil) - if err := db.Update(func(tx *bolt.Tx) error { - n := 2 + rand.Intn(5) - for i := 0; i < n; i++ { - k := []byte(fmt.Sprintf("b%d", i)) - b, err := tx.CreateBucketIfNotExists(k) - if err != nil { - return err - } - if err := b.SetSequence(uint64(i)); err != nil { - return err - } - if err := fillBucket(b, append(k, '.')); err != nil { - return err - } - } - return nil - }); err != nil { - db.Close() - t.Fatal(err) - } - - // make the db grow by adding large values, and delete them. - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucketIfNotExists([]byte("large_vals")) - if err != nil { - return err - } - n := 5 + rand.Intn(5) - for i := 0; i < n; i++ { - v := make([]byte, 1000*1000*(1+rand.Intn(5))) - _, err := crypto.Read(v) - if err != nil { - return err - } - if err := b.Put([]byte(fmt.Sprintf("l%d", i)), v); err != nil { - return err - } - } - return nil - }); err != nil { - db.Close() - t.Fatal(err) - } - if err := db.Update(func(tx *bolt.Tx) error { - c := tx.Bucket([]byte("large_vals")).Cursor() - for k, _ := c.First(); k != nil; k, _ = c.Next() { - if err := c.Delete(); err != nil { - return err - } - } - return tx.DeleteBucket([]byte("large_vals")) - }); err != nil { - db.Close() - t.Fatal(err) - } - db.DB.Close() - defer db.Close() - defer dstdb.Close() - - dbChk, err := chkdb(db.Path) - if err != nil { - t.Fatal(err) - } - - m := NewMain() - if err := m.Run("compact", "-o", dstdb.Path, db.Path); err != nil { - t.Fatal(err) - } - - dbChkAfterCompact, err := chkdb(db.Path) - if err != nil { - t.Fatal(err) - } - - dstdbChk, err := chkdb(dstdb.Path) - if err != nil { - t.Fatal(err) - } - - if !bytes.Equal(dbChk, dbChkAfterCompact) { - t.Error("the original db has been touched") - } - if !bytes.Equal(dbChk, dstdbChk) { - t.Error("the compacted db data isn't the same than the original db") - } -} - -func fillBucket(b *bolt.Bucket, prefix []byte) error { - n := 10 + rand.Intn(50) - for i := 0; i < n; i++ { - v := make([]byte, 10*(1+rand.Intn(4))) - _, err := crypto.Read(v) - if err != nil { - return err - } - k := append(prefix, []byte(fmt.Sprintf("k%d", i))...) - if err := b.Put(k, v); err != nil { - return err - } - } - // limit depth of subbuckets - s := 2 + rand.Intn(4) - if len(prefix) > (2*s + 1) { - return nil - } - n = 1 + rand.Intn(3) - for i := 0; i < n; i++ { - k := append(prefix, []byte(fmt.Sprintf("b%d", i))...) - sb, err := b.CreateBucket(k) - if err != nil { - return err - } - if err := fillBucket(sb, append(k, '.')); err != nil { - return err - } - } - return nil -} - -func chkdb(path string) ([]byte, error) { - db, err := bolt.Open(path, 0666, nil) - if err != nil { - return nil, err - } - defer db.Close() - var buf bytes.Buffer - err = db.View(func(tx *bolt.Tx) error { - return tx.ForEach(func(name []byte, b *bolt.Bucket) error { - return walkBucket(b, name, nil, &buf) - }) - }) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -func walkBucket(parent *bolt.Bucket, k []byte, v []byte, w io.Writer) error { - if _, err := fmt.Fprintf(w, "%d:%x=%x\n", parent.Sequence(), k, v); err != nil { - return err - } - - // not a bucket, exit. - if v != nil { - return nil - } - return parent.ForEach(func(k, v []byte) error { - if v == nil { - return walkBucket(parent.Bucket(k), k, nil, w) - } - return walkBucket(parent, k, v, w) - }) -} diff --git a/installer/vendor/github.com/boltdb/bolt/cursor_test.go b/installer/vendor/github.com/boltdb/bolt/cursor_test.go deleted file mode 100644 index 562d60f9af..0000000000 --- a/installer/vendor/github.com/boltdb/bolt/cursor_test.go +++ /dev/null @@ -1,817 +0,0 @@ -package bolt_test - -import ( - "bytes" - "encoding/binary" - "fmt" - "log" - "os" - "reflect" - "sort" - "testing" - "testing/quick" - - "github.com/boltdb/bolt" -) - -// Ensure that a cursor can return a reference to the bucket that created it. -func TestCursor_Bucket(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if cb := b.Cursor().Bucket(); !reflect.DeepEqual(cb, b) { - t.Fatal("cursor bucket mismatch") - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a Tx cursor can seek to the appropriate keys. -func TestCursor_Seek(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("0001")); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("bar"), []byte("0002")); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("baz"), []byte("0003")); err != nil { - t.Fatal(err) - } - - if _, err := b.CreateBucket([]byte("bkt")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.View(func(tx *bolt.Tx) error { - c := tx.Bucket([]byte("widgets")).Cursor() - - // Exact match should go to the key. - if k, v := c.Seek([]byte("bar")); !bytes.Equal(k, []byte("bar")) { - t.Fatalf("unexpected key: %v", k) - } else if !bytes.Equal(v, []byte("0002")) { - t.Fatalf("unexpected value: %v", v) - } - - // Inexact match should go to the next key. - if k, v := c.Seek([]byte("bas")); !bytes.Equal(k, []byte("baz")) { - t.Fatalf("unexpected key: %v", k) - } else if !bytes.Equal(v, []byte("0003")) { - t.Fatalf("unexpected value: %v", v) - } - - // Low key should go to the first key. - if k, v := c.Seek([]byte("")); !bytes.Equal(k, []byte("bar")) { - t.Fatalf("unexpected key: %v", k) - } else if !bytes.Equal(v, []byte("0002")) { - t.Fatalf("unexpected value: %v", v) - } - - // High key should return no key. - if k, v := c.Seek([]byte("zzz")); k != nil { - t.Fatalf("expected nil key: %v", k) - } else if v != nil { - t.Fatalf("expected nil value: %v", v) - } - - // Buckets should return their key but no value. - if k, v := c.Seek([]byte("bkt")); !bytes.Equal(k, []byte("bkt")) { - t.Fatalf("unexpected key: %v", k) - } else if v != nil { - t.Fatalf("expected nil value: %v", v) - } - - return nil - }); err != nil { - t.Fatal(err) - } -} - -func TestCursor_Delete(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - const count = 1000 - - // Insert every other key between 0 and $count. - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - for i := 0; i < count; i += 1 { - k := make([]byte, 8) - binary.BigEndian.PutUint64(k, uint64(i)) - if err := b.Put(k, make([]byte, 100)); err != nil { - t.Fatal(err) - } - } - if _, err := b.CreateBucket([]byte("sub")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.Update(func(tx *bolt.Tx) error { - c := tx.Bucket([]byte("widgets")).Cursor() - bound := make([]byte, 8) - binary.BigEndian.PutUint64(bound, uint64(count/2)) - for key, _ := c.First(); bytes.Compare(key, bound) < 0; key, _ = c.Next() { - if err := c.Delete(); err != nil { - t.Fatal(err) - } - } - - c.Seek([]byte("sub")) - if err := c.Delete(); err != bolt.ErrIncompatibleValue { - t.Fatalf("unexpected error: %s", err) - } - - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.View(func(tx *bolt.Tx) error { - stats := tx.Bucket([]byte("widgets")).Stats() - if stats.KeyN != count/2+1 { - t.Fatalf("unexpected KeyN: %d", stats.KeyN) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a Tx cursor can seek to the appropriate keys when there are a -// large number of keys. This test also checks that seek will always move -// forward to the next key. -// -// Related: https://github.com/boltdb/bolt/pull/187 -func TestCursor_Seek_Large(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - var count = 10000 - - // Insert every other key between 0 and $count. - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - - for i := 0; i < count; i += 100 { - for j := i; j < i+100; j += 2 { - k := make([]byte, 8) - binary.BigEndian.PutUint64(k, uint64(j)) - if err := b.Put(k, make([]byte, 100)); err != nil { - t.Fatal(err) - } - } - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.View(func(tx *bolt.Tx) error { - c := tx.Bucket([]byte("widgets")).Cursor() - for i := 0; i < count; i++ { - seek := make([]byte, 8) - binary.BigEndian.PutUint64(seek, uint64(i)) - - k, _ := c.Seek(seek) - - // The last seek is beyond the end of the the range so - // it should return nil. - if i == count-1 { - if k != nil { - t.Fatal("expected nil key") - } - continue - } - - // Otherwise we should seek to the exact key or the next key. - num := binary.BigEndian.Uint64(k) - if i%2 == 0 { - if num != uint64(i) { - t.Fatalf("unexpected num: %d", num) - } - } else { - if num != uint64(i+1) { - t.Fatalf("unexpected num: %d", num) - } - } - } - - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a cursor can iterate over an empty bucket without error. -func TestCursor_EmptyBucket(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }); err != nil { - t.Fatal(err) - } - - if err := db.View(func(tx *bolt.Tx) error { - c := tx.Bucket([]byte("widgets")).Cursor() - k, v := c.First() - if k != nil { - t.Fatalf("unexpected key: %v", k) - } else if v != nil { - t.Fatalf("unexpected value: %v", v) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a Tx cursor can reverse iterate over an empty bucket without error. -func TestCursor_EmptyBucketReverse(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }); err != nil { - t.Fatal(err) - } - if err := db.View(func(tx *bolt.Tx) error { - c := tx.Bucket([]byte("widgets")).Cursor() - k, v := c.Last() - if k != nil { - t.Fatalf("unexpected key: %v", k) - } else if v != nil { - t.Fatalf("unexpected value: %v", v) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a Tx cursor can iterate over a single root with a couple elements. -func TestCursor_Iterate_Leaf(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("baz"), []byte{}); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte{0}); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("bar"), []byte{1}); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - tx, err := db.Begin(false) - if err != nil { - t.Fatal(err) - } - defer func() { _ = tx.Rollback() }() - - c := tx.Bucket([]byte("widgets")).Cursor() - - k, v := c.First() - if !bytes.Equal(k, []byte("bar")) { - t.Fatalf("unexpected key: %v", k) - } else if !bytes.Equal(v, []byte{1}) { - t.Fatalf("unexpected value: %v", v) - } - - k, v = c.Next() - if !bytes.Equal(k, []byte("baz")) { - t.Fatalf("unexpected key: %v", k) - } else if !bytes.Equal(v, []byte{}) { - t.Fatalf("unexpected value: %v", v) - } - - k, v = c.Next() - if !bytes.Equal(k, []byte("foo")) { - t.Fatalf("unexpected key: %v", k) - } else if !bytes.Equal(v, []byte{0}) { - t.Fatalf("unexpected value: %v", v) - } - - k, v = c.Next() - if k != nil { - t.Fatalf("expected nil key: %v", k) - } else if v != nil { - t.Fatalf("expected nil value: %v", v) - } - - k, v = c.Next() - if k != nil { - t.Fatalf("expected nil key: %v", k) - } else if v != nil { - t.Fatalf("expected nil value: %v", v) - } - - if err := tx.Rollback(); err != nil { - t.Fatal(err) - } -} - -// Ensure that a Tx cursor can iterate in reverse over a single root with a couple elements. -func TestCursor_LeafRootReverse(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("baz"), []byte{}); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte{0}); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("bar"), []byte{1}); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - tx, err := db.Begin(false) - if err != nil { - t.Fatal(err) - } - c := tx.Bucket([]byte("widgets")).Cursor() - - if k, v := c.Last(); !bytes.Equal(k, []byte("foo")) { - t.Fatalf("unexpected key: %v", k) - } else if !bytes.Equal(v, []byte{0}) { - t.Fatalf("unexpected value: %v", v) - } - - if k, v := c.Prev(); !bytes.Equal(k, []byte("baz")) { - t.Fatalf("unexpected key: %v", k) - } else if !bytes.Equal(v, []byte{}) { - t.Fatalf("unexpected value: %v", v) - } - - if k, v := c.Prev(); !bytes.Equal(k, []byte("bar")) { - t.Fatalf("unexpected key: %v", k) - } else if !bytes.Equal(v, []byte{1}) { - t.Fatalf("unexpected value: %v", v) - } - - if k, v := c.Prev(); k != nil { - t.Fatalf("expected nil key: %v", k) - } else if v != nil { - t.Fatalf("expected nil value: %v", v) - } - - if k, v := c.Prev(); k != nil { - t.Fatalf("expected nil key: %v", k) - } else if v != nil { - t.Fatalf("expected nil value: %v", v) - } - - if err := tx.Rollback(); err != nil { - t.Fatal(err) - } -} - -// Ensure that a Tx cursor can restart from the beginning. -func TestCursor_Restart(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("bar"), []byte{}); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte{}); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - tx, err := db.Begin(false) - if err != nil { - t.Fatal(err) - } - c := tx.Bucket([]byte("widgets")).Cursor() - - if k, _ := c.First(); !bytes.Equal(k, []byte("bar")) { - t.Fatalf("unexpected key: %v", k) - } - if k, _ := c.Next(); !bytes.Equal(k, []byte("foo")) { - t.Fatalf("unexpected key: %v", k) - } - - if k, _ := c.First(); !bytes.Equal(k, []byte("bar")) { - t.Fatalf("unexpected key: %v", k) - } - if k, _ := c.Next(); !bytes.Equal(k, []byte("foo")) { - t.Fatalf("unexpected key: %v", k) - } - - if err := tx.Rollback(); err != nil { - t.Fatal(err) - } -} - -// Ensure that a cursor can skip over empty pages that have been deleted. -func TestCursor_First_EmptyPages(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - // Create 1000 keys in the "widgets" bucket. - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - - for i := 0; i < 1000; i++ { - if err := b.Put(u64tob(uint64(i)), []byte{}); err != nil { - t.Fatal(err) - } - } - - return nil - }); err != nil { - t.Fatal(err) - } - - // Delete half the keys and then try to iterate. - if err := db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for i := 0; i < 600; i++ { - if err := b.Delete(u64tob(uint64(i))); err != nil { - t.Fatal(err) - } - } - - c := b.Cursor() - var n int - for k, _ := c.First(); k != nil; k, _ = c.Next() { - n++ - } - if n != 400 { - t.Fatalf("unexpected key count: %d", n) - } - - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a Tx can iterate over all elements in a bucket. -func TestCursor_QuickCheck(t *testing.T) { - f := func(items testdata) bool { - db := MustOpenDB() - defer db.MustClose() - - // Bulk insert all values. - tx, err := db.Begin(true) - if err != nil { - t.Fatal(err) - } - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - for _, item := range items { - if err := b.Put(item.Key, item.Value); err != nil { - t.Fatal(err) - } - } - if err := tx.Commit(); err != nil { - t.Fatal(err) - } - - // Sort test data. - sort.Sort(items) - - // Iterate over all items and check consistency. - var index = 0 - tx, err = db.Begin(false) - if err != nil { - t.Fatal(err) - } - - c := tx.Bucket([]byte("widgets")).Cursor() - for k, v := c.First(); k != nil && index < len(items); k, v = c.Next() { - if !bytes.Equal(k, items[index].Key) { - t.Fatalf("unexpected key: %v", k) - } else if !bytes.Equal(v, items[index].Value) { - t.Fatalf("unexpected value: %v", v) - } - index++ - } - if len(items) != index { - t.Fatalf("unexpected item count: %v, expected %v", len(items), index) - } - - if err := tx.Rollback(); err != nil { - t.Fatal(err) - } - - return true - } - if err := quick.Check(f, qconfig()); err != nil { - t.Error(err) - } -} - -// Ensure that a transaction can iterate over all elements in a bucket in reverse. -func TestCursor_QuickCheck_Reverse(t *testing.T) { - f := func(items testdata) bool { - db := MustOpenDB() - defer db.MustClose() - - // Bulk insert all values. - tx, err := db.Begin(true) - if err != nil { - t.Fatal(err) - } - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - for _, item := range items { - if err := b.Put(item.Key, item.Value); err != nil { - t.Fatal(err) - } - } - if err := tx.Commit(); err != nil { - t.Fatal(err) - } - - // Sort test data. - sort.Sort(revtestdata(items)) - - // Iterate over all items and check consistency. - var index = 0 - tx, err = db.Begin(false) - if err != nil { - t.Fatal(err) - } - c := tx.Bucket([]byte("widgets")).Cursor() - for k, v := c.Last(); k != nil && index < len(items); k, v = c.Prev() { - if !bytes.Equal(k, items[index].Key) { - t.Fatalf("unexpected key: %v", k) - } else if !bytes.Equal(v, items[index].Value) { - t.Fatalf("unexpected value: %v", v) - } - index++ - } - if len(items) != index { - t.Fatalf("unexpected item count: %v, expected %v", len(items), index) - } - - if err := tx.Rollback(); err != nil { - t.Fatal(err) - } - - return true - } - if err := quick.Check(f, qconfig()); err != nil { - t.Error(err) - } -} - -// Ensure that a Tx cursor can iterate over subbuckets. -func TestCursor_QuickCheck_BucketsOnly(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if _, err := b.CreateBucket([]byte("foo")); err != nil { - t.Fatal(err) - } - if _, err := b.CreateBucket([]byte("bar")); err != nil { - t.Fatal(err) - } - if _, err := b.CreateBucket([]byte("baz")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.View(func(tx *bolt.Tx) error { - var names []string - c := tx.Bucket([]byte("widgets")).Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - names = append(names, string(k)) - if v != nil { - t.Fatalf("unexpected value: %v", v) - } - } - if !reflect.DeepEqual(names, []string{"bar", "baz", "foo"}) { - t.Fatalf("unexpected names: %+v", names) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a Tx cursor can reverse iterate over subbuckets. -func TestCursor_QuickCheck_BucketsOnly_Reverse(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if _, err := b.CreateBucket([]byte("foo")); err != nil { - t.Fatal(err) - } - if _, err := b.CreateBucket([]byte("bar")); err != nil { - t.Fatal(err) - } - if _, err := b.CreateBucket([]byte("baz")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.View(func(tx *bolt.Tx) error { - var names []string - c := tx.Bucket([]byte("widgets")).Cursor() - for k, v := c.Last(); k != nil; k, v = c.Prev() { - names = append(names, string(k)) - if v != nil { - t.Fatalf("unexpected value: %v", v) - } - } - if !reflect.DeepEqual(names, []string{"foo", "baz", "bar"}) { - t.Fatalf("unexpected names: %+v", names) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -func ExampleCursor() { - // Open the database. - db, err := bolt.Open(tempfile(), 0666, nil) - if err != nil { - log.Fatal(err) - } - defer os.Remove(db.Path()) - - // Start a read-write transaction. - if err := db.Update(func(tx *bolt.Tx) error { - // Create a new bucket. - b, err := tx.CreateBucket([]byte("animals")) - if err != nil { - return err - } - - // Insert data into a bucket. - if err := b.Put([]byte("dog"), []byte("fun")); err != nil { - log.Fatal(err) - } - if err := b.Put([]byte("cat"), []byte("lame")); err != nil { - log.Fatal(err) - } - if err := b.Put([]byte("liger"), []byte("awesome")); err != nil { - log.Fatal(err) - } - - // Create a cursor for iteration. - c := b.Cursor() - - // Iterate over items in sorted key order. This starts from the - // first key/value pair and updates the k/v variables to the - // next key/value on each iteration. - // - // The loop finishes at the end of the cursor when a nil key is returned. - for k, v := c.First(); k != nil; k, v = c.Next() { - fmt.Printf("A %s is %s.\n", k, v) - } - - return nil - }); err != nil { - log.Fatal(err) - } - - if err := db.Close(); err != nil { - log.Fatal(err) - } - - // Output: - // A cat is lame. - // A dog is fun. - // A liger is awesome. -} - -func ExampleCursor_reverse() { - // Open the database. - db, err := bolt.Open(tempfile(), 0666, nil) - if err != nil { - log.Fatal(err) - } - defer os.Remove(db.Path()) - - // Start a read-write transaction. - if err := db.Update(func(tx *bolt.Tx) error { - // Create a new bucket. - b, err := tx.CreateBucket([]byte("animals")) - if err != nil { - return err - } - - // Insert data into a bucket. - if err := b.Put([]byte("dog"), []byte("fun")); err != nil { - log.Fatal(err) - } - if err := b.Put([]byte("cat"), []byte("lame")); err != nil { - log.Fatal(err) - } - if err := b.Put([]byte("liger"), []byte("awesome")); err != nil { - log.Fatal(err) - } - - // Create a cursor for iteration. - c := b.Cursor() - - // Iterate over items in reverse sorted key order. This starts - // from the last key/value pair and updates the k/v variables to - // the previous key/value on each iteration. - // - // The loop finishes at the beginning of the cursor when a nil key - // is returned. - for k, v := c.Last(); k != nil; k, v = c.Prev() { - fmt.Printf("A %s is %s.\n", k, v) - } - - return nil - }); err != nil { - log.Fatal(err) - } - - // Close the database to release the file lock. - if err := db.Close(); err != nil { - log.Fatal(err) - } - - // Output: - // A liger is awesome. - // A dog is fun. - // A cat is lame. -} diff --git a/installer/vendor/github.com/boltdb/bolt/db_test.go b/installer/vendor/github.com/boltdb/bolt/db_test.go deleted file mode 100644 index 3034d4f476..0000000000 --- a/installer/vendor/github.com/boltdb/bolt/db_test.go +++ /dev/null @@ -1,1545 +0,0 @@ -package bolt_test - -import ( - "bytes" - "encoding/binary" - "errors" - "flag" - "fmt" - "hash/fnv" - "io/ioutil" - "log" - "os" - "path/filepath" - "regexp" - "sort" - "strings" - "sync" - "testing" - "time" - "unsafe" - - "github.com/boltdb/bolt" -) - -var statsFlag = flag.Bool("stats", false, "show performance stats") - -// version is the data file format version. -const version = 2 - -// magic is the marker value to indicate that a file is a Bolt DB. -const magic uint32 = 0xED0CDAED - -// pageSize is the size of one page in the data file. -const pageSize = 4096 - -// pageHeaderSize is the size of a page header. -const pageHeaderSize = 16 - -// meta represents a simplified version of a database meta page for testing. -type meta struct { - magic uint32 - version uint32 - _ uint32 - _ uint32 - _ [16]byte - _ uint64 - pgid uint64 - _ uint64 - checksum uint64 -} - -// Ensure that a database can be opened without error. -func TestOpen(t *testing.T) { - path := tempfile() - db, err := bolt.Open(path, 0666, nil) - if err != nil { - t.Fatal(err) - } else if db == nil { - t.Fatal("expected db") - } - - if s := db.Path(); s != path { - t.Fatalf("unexpected path: %s", s) - } - - if err := db.Close(); err != nil { - t.Fatal(err) - } -} - -// Ensure that opening a database with a blank path returns an error. -func TestOpen_ErrPathRequired(t *testing.T) { - _, err := bolt.Open("", 0666, nil) - if err == nil { - t.Fatalf("expected error") - } -} - -// Ensure that opening a database with a bad path returns an error. -func TestOpen_ErrNotExists(t *testing.T) { - _, err := bolt.Open(filepath.Join(tempfile(), "bad-path"), 0666, nil) - if err == nil { - t.Fatal("expected error") - } -} - -// Ensure that opening a file that is not a Bolt database returns ErrInvalid. -func TestOpen_ErrInvalid(t *testing.T) { - path := tempfile() - - f, err := os.Create(path) - if err != nil { - t.Fatal(err) - } - if _, err := fmt.Fprintln(f, "this is not a bolt database"); err != nil { - t.Fatal(err) - } - if err := f.Close(); err != nil { - t.Fatal(err) - } - defer os.Remove(path) - - if _, err := bolt.Open(path, 0666, nil); err != bolt.ErrInvalid { - t.Fatalf("unexpected error: %s", err) - } -} - -// Ensure that opening a file with two invalid versions returns ErrVersionMismatch. -func TestOpen_ErrVersionMismatch(t *testing.T) { - if pageSize != os.Getpagesize() { - t.Skip("page size mismatch") - } - - // Create empty database. - db := MustOpenDB() - path := db.Path() - defer db.MustClose() - - // Close database. - if err := db.DB.Close(); err != nil { - t.Fatal(err) - } - - // Read data file. - buf, err := ioutil.ReadFile(path) - if err != nil { - t.Fatal(err) - } - - // Rewrite meta pages. - meta0 := (*meta)(unsafe.Pointer(&buf[pageHeaderSize])) - meta0.version++ - meta1 := (*meta)(unsafe.Pointer(&buf[pageSize+pageHeaderSize])) - meta1.version++ - if err := ioutil.WriteFile(path, buf, 0666); err != nil { - t.Fatal(err) - } - - // Reopen data file. - if _, err := bolt.Open(path, 0666, nil); err != bolt.ErrVersionMismatch { - t.Fatalf("unexpected error: %s", err) - } -} - -// Ensure that opening a file with two invalid checksums returns ErrChecksum. -func TestOpen_ErrChecksum(t *testing.T) { - if pageSize != os.Getpagesize() { - t.Skip("page size mismatch") - } - - // Create empty database. - db := MustOpenDB() - path := db.Path() - defer db.MustClose() - - // Close database. - if err := db.DB.Close(); err != nil { - t.Fatal(err) - } - - // Read data file. - buf, err := ioutil.ReadFile(path) - if err != nil { - t.Fatal(err) - } - - // Rewrite meta pages. - meta0 := (*meta)(unsafe.Pointer(&buf[pageHeaderSize])) - meta0.pgid++ - meta1 := (*meta)(unsafe.Pointer(&buf[pageSize+pageHeaderSize])) - meta1.pgid++ - if err := ioutil.WriteFile(path, buf, 0666); err != nil { - t.Fatal(err) - } - - // Reopen data file. - if _, err := bolt.Open(path, 0666, nil); err != bolt.ErrChecksum { - t.Fatalf("unexpected error: %s", err) - } -} - -// Ensure that opening a database does not increase its size. -// https://github.com/boltdb/bolt/issues/291 -func TestOpen_Size(t *testing.T) { - // Open a data file. - db := MustOpenDB() - path := db.Path() - defer db.MustClose() - - pagesize := db.Info().PageSize - - // Insert until we get above the minimum 4MB size. - if err := db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucketIfNotExists([]byte("data")) - for i := 0; i < 10000; i++ { - if err := b.Put([]byte(fmt.Sprintf("%04d", i)), make([]byte, 1000)); err != nil { - t.Fatal(err) - } - } - return nil - }); err != nil { - t.Fatal(err) - } - - // Close database and grab the size. - if err := db.DB.Close(); err != nil { - t.Fatal(err) - } - sz := fileSize(path) - if sz == 0 { - t.Fatalf("unexpected new file size: %d", sz) - } - - // Reopen database, update, and check size again. - db0, err := bolt.Open(path, 0666, nil) - if err != nil { - t.Fatal(err) - } - if err := db0.Update(func(tx *bolt.Tx) error { - if err := tx.Bucket([]byte("data")).Put([]byte{0}, []byte{0}); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - if err := db0.Close(); err != nil { - t.Fatal(err) - } - newSz := fileSize(path) - if newSz == 0 { - t.Fatalf("unexpected new file size: %d", newSz) - } - - // Compare the original size with the new size. - // db size might increase by a few page sizes due to the new small update. - if sz < newSz-5*int64(pagesize) { - t.Fatalf("unexpected file growth: %d => %d", sz, newSz) - } -} - -// Ensure that opening a database beyond the max step size does not increase its size. -// https://github.com/boltdb/bolt/issues/303 -func TestOpen_Size_Large(t *testing.T) { - if testing.Short() { - t.Skip("short mode") - } - - // Open a data file. - db := MustOpenDB() - path := db.Path() - defer db.MustClose() - - pagesize := db.Info().PageSize - - // Insert until we get above the minimum 4MB size. - var index uint64 - for i := 0; i < 10000; i++ { - if err := db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucketIfNotExists([]byte("data")) - for j := 0; j < 1000; j++ { - if err := b.Put(u64tob(index), make([]byte, 50)); err != nil { - t.Fatal(err) - } - index++ - } - return nil - }); err != nil { - t.Fatal(err) - } - } - - // Close database and grab the size. - if err := db.DB.Close(); err != nil { - t.Fatal(err) - } - sz := fileSize(path) - if sz == 0 { - t.Fatalf("unexpected new file size: %d", sz) - } else if sz < (1 << 30) { - t.Fatalf("expected larger initial size: %d", sz) - } - - // Reopen database, update, and check size again. - db0, err := bolt.Open(path, 0666, nil) - if err != nil { - t.Fatal(err) - } - if err := db0.Update(func(tx *bolt.Tx) error { - return tx.Bucket([]byte("data")).Put([]byte{0}, []byte{0}) - }); err != nil { - t.Fatal(err) - } - if err := db0.Close(); err != nil { - t.Fatal(err) - } - - newSz := fileSize(path) - if newSz == 0 { - t.Fatalf("unexpected new file size: %d", newSz) - } - - // Compare the original size with the new size. - // db size might increase by a few page sizes due to the new small update. - if sz < newSz-5*int64(pagesize) { - t.Fatalf("unexpected file growth: %d => %d", sz, newSz) - } -} - -// Ensure that a re-opened database is consistent. -func TestOpen_Check(t *testing.T) { - path := tempfile() - - db, err := bolt.Open(path, 0666, nil) - if err != nil { - t.Fatal(err) - } - if err := db.View(func(tx *bolt.Tx) error { return <-tx.Check() }); err != nil { - t.Fatal(err) - } - if err := db.Close(); err != nil { - t.Fatal(err) - } - - db, err = bolt.Open(path, 0666, nil) - if err != nil { - t.Fatal(err) - } - if err := db.View(func(tx *bolt.Tx) error { return <-tx.Check() }); err != nil { - t.Fatal(err) - } - if err := db.Close(); err != nil { - t.Fatal(err) - } -} - -// Ensure that write errors to the meta file handler during initialization are returned. -func TestOpen_MetaInitWriteError(t *testing.T) { - t.Skip("pending") -} - -// Ensure that a database that is too small returns an error. -func TestOpen_FileTooSmall(t *testing.T) { - path := tempfile() - - db, err := bolt.Open(path, 0666, nil) - if err != nil { - t.Fatal(err) - } - if err := db.Close(); err != nil { - t.Fatal(err) - } - - // corrupt the database - if err := os.Truncate(path, int64(os.Getpagesize())); err != nil { - t.Fatal(err) - } - - db, err = bolt.Open(path, 0666, nil) - if err == nil || err.Error() != "file size too small" { - t.Fatalf("unexpected error: %s", err) - } -} - -// TestDB_Open_InitialMmapSize tests if having InitialMmapSize large enough -// to hold data from concurrent write transaction resolves the issue that -// read transaction blocks the write transaction and causes deadlock. -// This is a very hacky test since the mmap size is not exposed. -func TestDB_Open_InitialMmapSize(t *testing.T) { - path := tempfile() - defer os.Remove(path) - - initMmapSize := 1 << 31 // 2GB - testWriteSize := 1 << 27 // 134MB - - db, err := bolt.Open(path, 0666, &bolt.Options{InitialMmapSize: initMmapSize}) - if err != nil { - t.Fatal(err) - } - - // create a long-running read transaction - // that never gets closed while writing - rtx, err := db.Begin(false) - if err != nil { - t.Fatal(err) - } - - // create a write transaction - wtx, err := db.Begin(true) - if err != nil { - t.Fatal(err) - } - - b, err := wtx.CreateBucket([]byte("test")) - if err != nil { - t.Fatal(err) - } - - // and commit a large write - err = b.Put([]byte("foo"), make([]byte, testWriteSize)) - if err != nil { - t.Fatal(err) - } - - done := make(chan struct{}) - - go func() { - if err := wtx.Commit(); err != nil { - t.Fatal(err) - } - done <- struct{}{} - }() - - select { - case <-time.After(5 * time.Second): - t.Errorf("unexpected that the reader blocks writer") - case <-done: - } - - if err := rtx.Rollback(); err != nil { - t.Fatal(err) - } -} - -// Ensure that a database cannot open a transaction when it's not open. -func TestDB_Begin_ErrDatabaseNotOpen(t *testing.T) { - var db bolt.DB - if _, err := db.Begin(false); err != bolt.ErrDatabaseNotOpen { - t.Fatalf("unexpected error: %s", err) - } -} - -// Ensure that a read-write transaction can be retrieved. -func TestDB_BeginRW(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - tx, err := db.Begin(true) - if err != nil { - t.Fatal(err) - } else if tx == nil { - t.Fatal("expected tx") - } - - if tx.DB() != db.DB { - t.Fatal("unexpected tx database") - } else if !tx.Writable() { - t.Fatal("expected writable tx") - } - - if err := tx.Commit(); err != nil { - t.Fatal(err) - } -} - -// Ensure that opening a transaction while the DB is closed returns an error. -func TestDB_BeginRW_Closed(t *testing.T) { - var db bolt.DB - if _, err := db.Begin(true); err != bolt.ErrDatabaseNotOpen { - t.Fatalf("unexpected error: %s", err) - } -} - -func TestDB_Close_PendingTx_RW(t *testing.T) { testDB_Close_PendingTx(t, true) } -func TestDB_Close_PendingTx_RO(t *testing.T) { testDB_Close_PendingTx(t, false) } - -// Ensure that a database cannot close while transactions are open. -func testDB_Close_PendingTx(t *testing.T, writable bool) { - db := MustOpenDB() - defer db.MustClose() - - // Start transaction. - tx, err := db.Begin(true) - if err != nil { - t.Fatal(err) - } - - // Open update in separate goroutine. - done := make(chan struct{}) - go func() { - if err := db.Close(); err != nil { - t.Fatal(err) - } - close(done) - }() - - // Ensure database hasn't closed. - time.Sleep(100 * time.Millisecond) - select { - case <-done: - t.Fatal("database closed too early") - default: - } - - // Commit transaction. - if err := tx.Commit(); err != nil { - t.Fatal(err) - } - - // Ensure database closed now. - time.Sleep(100 * time.Millisecond) - select { - case <-done: - default: - t.Fatal("database did not close") - } -} - -// Ensure a database can provide a transactional block. -func TestDB_Update(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("baz"), []byte("bat")); err != nil { - t.Fatal(err) - } - if err := b.Delete([]byte("foo")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - if err := db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - if v := b.Get([]byte("foo")); v != nil { - t.Fatalf("expected nil value, got: %v", v) - } - if v := b.Get([]byte("baz")); !bytes.Equal(v, []byte("bat")) { - t.Fatalf("unexpected value: %v", v) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure a closed database returns an error while running a transaction block -func TestDB_Update_Closed(t *testing.T) { - var db bolt.DB - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } - return nil - }); err != bolt.ErrDatabaseNotOpen { - t.Fatalf("unexpected error: %s", err) - } -} - -// Ensure a panic occurs while trying to commit a managed transaction. -func TestDB_Update_ManualCommit(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - var panicked bool - if err := db.Update(func(tx *bolt.Tx) error { - func() { - defer func() { - if r := recover(); r != nil { - panicked = true - } - }() - - if err := tx.Commit(); err != nil { - t.Fatal(err) - } - }() - return nil - }); err != nil { - t.Fatal(err) - } else if !panicked { - t.Fatal("expected panic") - } -} - -// Ensure a panic occurs while trying to rollback a managed transaction. -func TestDB_Update_ManualRollback(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - var panicked bool - if err := db.Update(func(tx *bolt.Tx) error { - func() { - defer func() { - if r := recover(); r != nil { - panicked = true - } - }() - - if err := tx.Rollback(); err != nil { - t.Fatal(err) - } - }() - return nil - }); err != nil { - t.Fatal(err) - } else if !panicked { - t.Fatal("expected panic") - } -} - -// Ensure a panic occurs while trying to commit a managed transaction. -func TestDB_View_ManualCommit(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - var panicked bool - if err := db.View(func(tx *bolt.Tx) error { - func() { - defer func() { - if r := recover(); r != nil { - panicked = true - } - }() - - if err := tx.Commit(); err != nil { - t.Fatal(err) - } - }() - return nil - }); err != nil { - t.Fatal(err) - } else if !panicked { - t.Fatal("expected panic") - } -} - -// Ensure a panic occurs while trying to rollback a managed transaction. -func TestDB_View_ManualRollback(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - var panicked bool - if err := db.View(func(tx *bolt.Tx) error { - func() { - defer func() { - if r := recover(); r != nil { - panicked = true - } - }() - - if err := tx.Rollback(); err != nil { - t.Fatal(err) - } - }() - return nil - }); err != nil { - t.Fatal(err) - } else if !panicked { - t.Fatal("expected panic") - } -} - -// Ensure a write transaction that panics does not hold open locks. -func TestDB_Update_Panic(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - // Panic during update but recover. - func() { - defer func() { - if r := recover(); r != nil { - t.Log("recover: update", r) - } - }() - - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } - panic("omg") - }); err != nil { - t.Fatal(err) - } - }() - - // Verify we can update again. - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - // Verify that our change persisted. - if err := db.Update(func(tx *bolt.Tx) error { - if tx.Bucket([]byte("widgets")) == nil { - t.Fatal("expected bucket") - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure a database can return an error through a read-only transactional block. -func TestDB_View_Error(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.View(func(tx *bolt.Tx) error { - return errors.New("xxx") - }); err == nil || err.Error() != "xxx" { - t.Fatalf("unexpected error: %s", err) - } -} - -// Ensure a read transaction that panics does not hold open locks. -func TestDB_View_Panic(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - // Panic during view transaction but recover. - func() { - defer func() { - if r := recover(); r != nil { - t.Log("recover: view", r) - } - }() - - if err := db.View(func(tx *bolt.Tx) error { - if tx.Bucket([]byte("widgets")) == nil { - t.Fatal("expected bucket") - } - panic("omg") - }); err != nil { - t.Fatal(err) - } - }() - - // Verify that we can still use read transactions. - if err := db.View(func(tx *bolt.Tx) error { - if tx.Bucket([]byte("widgets")) == nil { - t.Fatal("expected bucket") - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that DB stats can be returned. -func TestDB_Stats(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }); err != nil { - t.Fatal(err) - } - - stats := db.Stats() - if stats.TxStats.PageCount != 2 { - t.Fatalf("unexpected TxStats.PageCount: %d", stats.TxStats.PageCount) - } else if stats.FreePageN != 0 { - t.Fatalf("unexpected FreePageN != 0: %d", stats.FreePageN) - } else if stats.PendingPageN != 2 { - t.Fatalf("unexpected PendingPageN != 2: %d", stats.PendingPageN) - } -} - -// Ensure that database pages are in expected order and type. -func TestDB_Consistency(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }); err != nil { - t.Fatal(err) - } - - for i := 0; i < 10; i++ { - if err := db.Update(func(tx *bolt.Tx) error { - if err := tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - } - - if err := db.Update(func(tx *bolt.Tx) error { - if p, _ := tx.Page(0); p == nil { - t.Fatal("expected page") - } else if p.Type != "meta" { - t.Fatalf("unexpected page type: %s", p.Type) - } - - if p, _ := tx.Page(1); p == nil { - t.Fatal("expected page") - } else if p.Type != "meta" { - t.Fatalf("unexpected page type: %s", p.Type) - } - - if p, _ := tx.Page(2); p == nil { - t.Fatal("expected page") - } else if p.Type != "free" { - t.Fatalf("unexpected page type: %s", p.Type) - } - - if p, _ := tx.Page(3); p == nil { - t.Fatal("expected page") - } else if p.Type != "free" { - t.Fatalf("unexpected page type: %s", p.Type) - } - - if p, _ := tx.Page(4); p == nil { - t.Fatal("expected page") - } else if p.Type != "leaf" { - t.Fatalf("unexpected page type: %s", p.Type) - } - - if p, _ := tx.Page(5); p == nil { - t.Fatal("expected page") - } else if p.Type != "freelist" { - t.Fatalf("unexpected page type: %s", p.Type) - } - - if p, _ := tx.Page(6); p != nil { - t.Fatal("unexpected page") - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that DB stats can be subtracted from one another. -func TestDBStats_Sub(t *testing.T) { - var a, b bolt.Stats - a.TxStats.PageCount = 3 - a.FreePageN = 4 - b.TxStats.PageCount = 10 - b.FreePageN = 14 - diff := b.Sub(&a) - if diff.TxStats.PageCount != 7 { - t.Fatalf("unexpected TxStats.PageCount: %d", diff.TxStats.PageCount) - } - - // free page stats are copied from the receiver and not subtracted - if diff.FreePageN != 14 { - t.Fatalf("unexpected FreePageN: %d", diff.FreePageN) - } -} - -// Ensure two functions can perform updates in a single batch. -func TestDB_Batch(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - // Iterate over multiple updates in separate goroutines. - n := 2 - ch := make(chan error) - for i := 0; i < n; i++ { - go func(i int) { - ch <- db.Batch(func(tx *bolt.Tx) error { - return tx.Bucket([]byte("widgets")).Put(u64tob(uint64(i)), []byte{}) - }) - }(i) - } - - // Check all responses to make sure there's no error. - for i := 0; i < n; i++ { - if err := <-ch; err != nil { - t.Fatal(err) - } - } - - // Ensure data is correct. - if err := db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for i := 0; i < n; i++ { - if v := b.Get(u64tob(uint64(i))); v == nil { - t.Errorf("key not found: %d", i) - } - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -func TestDB_Batch_Panic(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - var sentinel int - var bork = &sentinel - var problem interface{} - var err error - - // Execute a function inside a batch that panics. - func() { - defer func() { - if p := recover(); p != nil { - problem = p - } - }() - err = db.Batch(func(tx *bolt.Tx) error { - panic(bork) - }) - }() - - // Verify there is no error. - if g, e := err, error(nil); g != e { - t.Fatalf("wrong error: %v != %v", g, e) - } - // Verify the panic was captured. - if g, e := problem, bork; g != e { - t.Fatalf("wrong error: %v != %v", g, e) - } -} - -func TestDB_BatchFull(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }); err != nil { - t.Fatal(err) - } - - const size = 3 - // buffered so we never leak goroutines - ch := make(chan error, size) - put := func(i int) { - ch <- db.Batch(func(tx *bolt.Tx) error { - return tx.Bucket([]byte("widgets")).Put(u64tob(uint64(i)), []byte{}) - }) - } - - db.MaxBatchSize = size - // high enough to never trigger here - db.MaxBatchDelay = 1 * time.Hour - - go put(1) - go put(2) - - // Give the batch a chance to exhibit bugs. - time.Sleep(10 * time.Millisecond) - - // not triggered yet - select { - case <-ch: - t.Fatalf("batch triggered too early") - default: - } - - go put(3) - - // Check all responses to make sure there's no error. - for i := 0; i < size; i++ { - if err := <-ch; err != nil { - t.Fatal(err) - } - } - - // Ensure data is correct. - if err := db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for i := 1; i <= size; i++ { - if v := b.Get(u64tob(uint64(i))); v == nil { - t.Errorf("key not found: %d", i) - } - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -func TestDB_BatchTime(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }); err != nil { - t.Fatal(err) - } - - const size = 1 - // buffered so we never leak goroutines - ch := make(chan error, size) - put := func(i int) { - ch <- db.Batch(func(tx *bolt.Tx) error { - return tx.Bucket([]byte("widgets")).Put(u64tob(uint64(i)), []byte{}) - }) - } - - db.MaxBatchSize = 1000 - db.MaxBatchDelay = 0 - - go put(1) - - // Batch must trigger by time alone. - - // Check all responses to make sure there's no error. - for i := 0; i < size; i++ { - if err := <-ch; err != nil { - t.Fatal(err) - } - } - - // Ensure data is correct. - if err := db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("widgets")) - for i := 1; i <= size; i++ { - if v := b.Get(u64tob(uint64(i))); v == nil { - t.Errorf("key not found: %d", i) - } - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -func ExampleDB_Update() { - // Open the database. - db, err := bolt.Open(tempfile(), 0666, nil) - if err != nil { - log.Fatal(err) - } - defer os.Remove(db.Path()) - - // Execute several commands within a read-write transaction. - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - return err - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - return err - } - return nil - }); err != nil { - log.Fatal(err) - } - - // Read the value back from a separate read-only transaction. - if err := db.View(func(tx *bolt.Tx) error { - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - fmt.Printf("The value of 'foo' is: %s\n", value) - return nil - }); err != nil { - log.Fatal(err) - } - - // Close database to release the file lock. - if err := db.Close(); err != nil { - log.Fatal(err) - } - - // Output: - // The value of 'foo' is: bar -} - -func ExampleDB_View() { - // Open the database. - db, err := bolt.Open(tempfile(), 0666, nil) - if err != nil { - log.Fatal(err) - } - defer os.Remove(db.Path()) - - // Insert data into a bucket. - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("people")) - if err != nil { - return err - } - if err := b.Put([]byte("john"), []byte("doe")); err != nil { - return err - } - if err := b.Put([]byte("susy"), []byte("que")); err != nil { - return err - } - return nil - }); err != nil { - log.Fatal(err) - } - - // Access data from within a read-only transactional block. - if err := db.View(func(tx *bolt.Tx) error { - v := tx.Bucket([]byte("people")).Get([]byte("john")) - fmt.Printf("John's last name is %s.\n", v) - return nil - }); err != nil { - log.Fatal(err) - } - - // Close database to release the file lock. - if err := db.Close(); err != nil { - log.Fatal(err) - } - - // Output: - // John's last name is doe. -} - -func ExampleDB_Begin_ReadOnly() { - // Open the database. - db, err := bolt.Open(tempfile(), 0666, nil) - if err != nil { - log.Fatal(err) - } - defer os.Remove(db.Path()) - - // Create a bucket using a read-write transaction. - if err := db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }); err != nil { - log.Fatal(err) - } - - // Create several keys in a transaction. - tx, err := db.Begin(true) - if err != nil { - log.Fatal(err) - } - b := tx.Bucket([]byte("widgets")) - if err := b.Put([]byte("john"), []byte("blue")); err != nil { - log.Fatal(err) - } - if err := b.Put([]byte("abby"), []byte("red")); err != nil { - log.Fatal(err) - } - if err := b.Put([]byte("zephyr"), []byte("purple")); err != nil { - log.Fatal(err) - } - if err := tx.Commit(); err != nil { - log.Fatal(err) - } - - // Iterate over the values in sorted key order. - tx, err = db.Begin(false) - if err != nil { - log.Fatal(err) - } - c := tx.Bucket([]byte("widgets")).Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - fmt.Printf("%s likes %s\n", k, v) - } - - if err := tx.Rollback(); err != nil { - log.Fatal(err) - } - - if err := db.Close(); err != nil { - log.Fatal(err) - } - - // Output: - // abby likes red - // john likes blue - // zephyr likes purple -} - -func BenchmarkDBBatchAutomatic(b *testing.B) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("bench")) - return err - }); err != nil { - b.Fatal(err) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - start := make(chan struct{}) - var wg sync.WaitGroup - - for round := 0; round < 1000; round++ { - wg.Add(1) - - go func(id uint32) { - defer wg.Done() - <-start - - h := fnv.New32a() - buf := make([]byte, 4) - binary.LittleEndian.PutUint32(buf, id) - _, _ = h.Write(buf[:]) - k := h.Sum(nil) - insert := func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("bench")) - return b.Put(k, []byte("filler")) - } - if err := db.Batch(insert); err != nil { - b.Error(err) - return - } - }(uint32(round)) - } - close(start) - wg.Wait() - } - - b.StopTimer() - validateBatchBench(b, db) -} - -func BenchmarkDBBatchSingle(b *testing.B) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("bench")) - return err - }); err != nil { - b.Fatal(err) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - start := make(chan struct{}) - var wg sync.WaitGroup - - for round := 0; round < 1000; round++ { - wg.Add(1) - go func(id uint32) { - defer wg.Done() - <-start - - h := fnv.New32a() - buf := make([]byte, 4) - binary.LittleEndian.PutUint32(buf, id) - _, _ = h.Write(buf[:]) - k := h.Sum(nil) - insert := func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("bench")) - return b.Put(k, []byte("filler")) - } - if err := db.Update(insert); err != nil { - b.Error(err) - return - } - }(uint32(round)) - } - close(start) - wg.Wait() - } - - b.StopTimer() - validateBatchBench(b, db) -} - -func BenchmarkDBBatchManual10x100(b *testing.B) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("bench")) - return err - }); err != nil { - b.Fatal(err) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - start := make(chan struct{}) - var wg sync.WaitGroup - - for major := 0; major < 10; major++ { - wg.Add(1) - go func(id uint32) { - defer wg.Done() - <-start - - insert100 := func(tx *bolt.Tx) error { - h := fnv.New32a() - buf := make([]byte, 4) - for minor := uint32(0); minor < 100; minor++ { - binary.LittleEndian.PutUint32(buf, uint32(id*100+minor)) - h.Reset() - _, _ = h.Write(buf[:]) - k := h.Sum(nil) - b := tx.Bucket([]byte("bench")) - if err := b.Put(k, []byte("filler")); err != nil { - return err - } - } - return nil - } - if err := db.Update(insert100); err != nil { - b.Fatal(err) - } - }(uint32(major)) - } - close(start) - wg.Wait() - } - - b.StopTimer() - validateBatchBench(b, db) -} - -func validateBatchBench(b *testing.B, db *DB) { - var rollback = errors.New("sentinel error to cause rollback") - validate := func(tx *bolt.Tx) error { - bucket := tx.Bucket([]byte("bench")) - h := fnv.New32a() - buf := make([]byte, 4) - for id := uint32(0); id < 1000; id++ { - binary.LittleEndian.PutUint32(buf, id) - h.Reset() - _, _ = h.Write(buf[:]) - k := h.Sum(nil) - v := bucket.Get(k) - if v == nil { - b.Errorf("not found id=%d key=%x", id, k) - continue - } - if g, e := v, []byte("filler"); !bytes.Equal(g, e) { - b.Errorf("bad value for id=%d key=%x: %s != %q", id, k, g, e) - } - if err := bucket.Delete(k); err != nil { - return err - } - } - // should be empty now - c := bucket.Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - b.Errorf("unexpected key: %x = %q", k, v) - } - return rollback - } - if err := db.Update(validate); err != nil && err != rollback { - b.Error(err) - } -} - -// DB is a test wrapper for bolt.DB. -type DB struct { - *bolt.DB -} - -// MustOpenDB returns a new, open DB at a temporary location. -func MustOpenDB() *DB { - db, err := bolt.Open(tempfile(), 0666, nil) - if err != nil { - panic(err) - } - return &DB{db} -} - -// Close closes the database and deletes the underlying file. -func (db *DB) Close() error { - // Log statistics. - if *statsFlag { - db.PrintStats() - } - - // Check database consistency after every test. - db.MustCheck() - - // Close database and remove file. - defer os.Remove(db.Path()) - return db.DB.Close() -} - -// MustClose closes the database and deletes the underlying file. Panic on error. -func (db *DB) MustClose() { - if err := db.Close(); err != nil { - panic(err) - } -} - -// PrintStats prints the database stats -func (db *DB) PrintStats() { - var stats = db.Stats() - fmt.Printf("[db] %-20s %-20s %-20s\n", - fmt.Sprintf("pg(%d/%d)", stats.TxStats.PageCount, stats.TxStats.PageAlloc), - fmt.Sprintf("cur(%d)", stats.TxStats.CursorCount), - fmt.Sprintf("node(%d/%d)", stats.TxStats.NodeCount, stats.TxStats.NodeDeref), - ) - fmt.Printf(" %-20s %-20s %-20s\n", - fmt.Sprintf("rebal(%d/%v)", stats.TxStats.Rebalance, truncDuration(stats.TxStats.RebalanceTime)), - fmt.Sprintf("spill(%d/%v)", stats.TxStats.Spill, truncDuration(stats.TxStats.SpillTime)), - fmt.Sprintf("w(%d/%v)", stats.TxStats.Write, truncDuration(stats.TxStats.WriteTime)), - ) -} - -// MustCheck runs a consistency check on the database and panics if any errors are found. -func (db *DB) MustCheck() { - if err := db.Update(func(tx *bolt.Tx) error { - // Collect all the errors. - var errors []error - for err := range tx.Check() { - errors = append(errors, err) - if len(errors) > 10 { - break - } - } - - // If errors occurred, copy the DB and print the errors. - if len(errors) > 0 { - var path = tempfile() - if err := tx.CopyFile(path, 0600); err != nil { - panic(err) - } - - // Print errors. - fmt.Print("\n\n") - fmt.Printf("consistency check failed (%d errors)\n", len(errors)) - for _, err := range errors { - fmt.Println(err) - } - fmt.Println("") - fmt.Println("db saved to:") - fmt.Println(path) - fmt.Print("\n\n") - os.Exit(-1) - } - - return nil - }); err != nil && err != bolt.ErrDatabaseNotOpen { - panic(err) - } -} - -// CopyTempFile copies a database to a temporary file. -func (db *DB) CopyTempFile() { - path := tempfile() - if err := db.View(func(tx *bolt.Tx) error { - return tx.CopyFile(path, 0600) - }); err != nil { - panic(err) - } - fmt.Println("db copied to: ", path) -} - -// tempfile returns a temporary file path. -func tempfile() string { - f, err := ioutil.TempFile("", "bolt-") - if err != nil { - panic(err) - } - if err := f.Close(); err != nil { - panic(err) - } - if err := os.Remove(f.Name()); err != nil { - panic(err) - } - return f.Name() -} - -// mustContainKeys checks that a bucket contains a given set of keys. -func mustContainKeys(b *bolt.Bucket, m map[string]string) { - found := make(map[string]string) - if err := b.ForEach(func(k, _ []byte) error { - found[string(k)] = "" - return nil - }); err != nil { - panic(err) - } - - // Check for keys found in bucket that shouldn't be there. - var keys []string - for k, _ := range found { - if _, ok := m[string(k)]; !ok { - keys = append(keys, k) - } - } - if len(keys) > 0 { - sort.Strings(keys) - panic(fmt.Sprintf("keys found(%d): %s", len(keys), strings.Join(keys, ","))) - } - - // Check for keys not found in bucket that should be there. - for k, _ := range m { - if _, ok := found[string(k)]; !ok { - keys = append(keys, k) - } - } - if len(keys) > 0 { - sort.Strings(keys) - panic(fmt.Sprintf("keys not found(%d): %s", len(keys), strings.Join(keys, ","))) - } -} - -func trunc(b []byte, length int) []byte { - if length < len(b) { - return b[:length] - } - return b -} - -func truncDuration(d time.Duration) string { - return regexp.MustCompile(`^(\d+)(\.\d+)`).ReplaceAllString(d.String(), "$1") -} - -func fileSize(path string) int64 { - fi, err := os.Stat(path) - if err != nil { - return 0 - } - return fi.Size() -} - -func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) } -func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) } - -// u64tob converts a uint64 into an 8-byte slice. -func u64tob(v uint64) []byte { - b := make([]byte, 8) - binary.BigEndian.PutUint64(b, v) - return b -} - -// btou64 converts an 8-byte slice into an uint64. -func btou64(b []byte) uint64 { return binary.BigEndian.Uint64(b) } diff --git a/installer/vendor/github.com/boltdb/bolt/freelist_test.go b/installer/vendor/github.com/boltdb/bolt/freelist_test.go deleted file mode 100644 index 4e9b3a8dbf..0000000000 --- a/installer/vendor/github.com/boltdb/bolt/freelist_test.go +++ /dev/null @@ -1,158 +0,0 @@ -package bolt - -import ( - "math/rand" - "reflect" - "sort" - "testing" - "unsafe" -) - -// Ensure that a page is added to a transaction's freelist. -func TestFreelist_free(t *testing.T) { - f := newFreelist() - f.free(100, &page{id: 12}) - if !reflect.DeepEqual([]pgid{12}, f.pending[100]) { - t.Fatalf("exp=%v; got=%v", []pgid{12}, f.pending[100]) - } -} - -// Ensure that a page and its overflow is added to a transaction's freelist. -func TestFreelist_free_overflow(t *testing.T) { - f := newFreelist() - f.free(100, &page{id: 12, overflow: 3}) - if exp := []pgid{12, 13, 14, 15}; !reflect.DeepEqual(exp, f.pending[100]) { - t.Fatalf("exp=%v; got=%v", exp, f.pending[100]) - } -} - -// Ensure that a transaction's free pages can be released. -func TestFreelist_release(t *testing.T) { - f := newFreelist() - f.free(100, &page{id: 12, overflow: 1}) - f.free(100, &page{id: 9}) - f.free(102, &page{id: 39}) - f.release(100) - f.release(101) - if exp := []pgid{9, 12, 13}; !reflect.DeepEqual(exp, f.ids) { - t.Fatalf("exp=%v; got=%v", exp, f.ids) - } - - f.release(102) - if exp := []pgid{9, 12, 13, 39}; !reflect.DeepEqual(exp, f.ids) { - t.Fatalf("exp=%v; got=%v", exp, f.ids) - } -} - -// Ensure that a freelist can find contiguous blocks of pages. -func TestFreelist_allocate(t *testing.T) { - f := &freelist{ids: []pgid{3, 4, 5, 6, 7, 9, 12, 13, 18}} - if id := int(f.allocate(3)); id != 3 { - t.Fatalf("exp=3; got=%v", id) - } - if id := int(f.allocate(1)); id != 6 { - t.Fatalf("exp=6; got=%v", id) - } - if id := int(f.allocate(3)); id != 0 { - t.Fatalf("exp=0; got=%v", id) - } - if id := int(f.allocate(2)); id != 12 { - t.Fatalf("exp=12; got=%v", id) - } - if id := int(f.allocate(1)); id != 7 { - t.Fatalf("exp=7; got=%v", id) - } - if id := int(f.allocate(0)); id != 0 { - t.Fatalf("exp=0; got=%v", id) - } - if id := int(f.allocate(0)); id != 0 { - t.Fatalf("exp=0; got=%v", id) - } - if exp := []pgid{9, 18}; !reflect.DeepEqual(exp, f.ids) { - t.Fatalf("exp=%v; got=%v", exp, f.ids) - } - - if id := int(f.allocate(1)); id != 9 { - t.Fatalf("exp=9; got=%v", id) - } - if id := int(f.allocate(1)); id != 18 { - t.Fatalf("exp=18; got=%v", id) - } - if id := int(f.allocate(1)); id != 0 { - t.Fatalf("exp=0; got=%v", id) - } - if exp := []pgid{}; !reflect.DeepEqual(exp, f.ids) { - t.Fatalf("exp=%v; got=%v", exp, f.ids) - } -} - -// Ensure that a freelist can deserialize from a freelist page. -func TestFreelist_read(t *testing.T) { - // Create a page. - var buf [4096]byte - page := (*page)(unsafe.Pointer(&buf[0])) - page.flags = freelistPageFlag - page.count = 2 - - // Insert 2 page ids. - ids := (*[3]pgid)(unsafe.Pointer(&page.ptr)) - ids[0] = 23 - ids[1] = 50 - - // Deserialize page into a freelist. - f := newFreelist() - f.read(page) - - // Ensure that there are two page ids in the freelist. - if exp := []pgid{23, 50}; !reflect.DeepEqual(exp, f.ids) { - t.Fatalf("exp=%v; got=%v", exp, f.ids) - } -} - -// Ensure that a freelist can serialize into a freelist page. -func TestFreelist_write(t *testing.T) { - // Create a freelist and write it to a page. - var buf [4096]byte - f := &freelist{ids: []pgid{12, 39}, pending: make(map[txid][]pgid)} - f.pending[100] = []pgid{28, 11} - f.pending[101] = []pgid{3} - p := (*page)(unsafe.Pointer(&buf[0])) - if err := f.write(p); err != nil { - t.Fatal(err) - } - - // Read the page back out. - f2 := newFreelist() - f2.read(p) - - // Ensure that the freelist is correct. - // All pages should be present and in reverse order. - if exp := []pgid{3, 11, 12, 28, 39}; !reflect.DeepEqual(exp, f2.ids) { - t.Fatalf("exp=%v; got=%v", exp, f2.ids) - } -} - -func Benchmark_FreelistRelease10K(b *testing.B) { benchmark_FreelistRelease(b, 10000) } -func Benchmark_FreelistRelease100K(b *testing.B) { benchmark_FreelistRelease(b, 100000) } -func Benchmark_FreelistRelease1000K(b *testing.B) { benchmark_FreelistRelease(b, 1000000) } -func Benchmark_FreelistRelease10000K(b *testing.B) { benchmark_FreelistRelease(b, 10000000) } - -func benchmark_FreelistRelease(b *testing.B, size int) { - ids := randomPgids(size) - pending := randomPgids(len(ids) / 400) - b.ResetTimer() - for i := 0; i < b.N; i++ { - f := &freelist{ids: ids, pending: map[txid][]pgid{1: pending}} - f.release(1) - } -} - -func randomPgids(n int) []pgid { - rand.Seed(42) - pgids := make(pgids, n) - for i := range pgids { - pgids[i] = pgid(rand.Int63()) - } - sort.Sort(pgids) - return pgids -} diff --git a/installer/vendor/github.com/boltdb/bolt/node_test.go b/installer/vendor/github.com/boltdb/bolt/node_test.go deleted file mode 100644 index fa5d10f999..0000000000 --- a/installer/vendor/github.com/boltdb/bolt/node_test.go +++ /dev/null @@ -1,156 +0,0 @@ -package bolt - -import ( - "testing" - "unsafe" -) - -// Ensure that a node can insert a key/value. -func TestNode_put(t *testing.T) { - n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{meta: &meta{pgid: 1}}}} - n.put([]byte("baz"), []byte("baz"), []byte("2"), 0, 0) - n.put([]byte("foo"), []byte("foo"), []byte("0"), 0, 0) - n.put([]byte("bar"), []byte("bar"), []byte("1"), 0, 0) - n.put([]byte("foo"), []byte("foo"), []byte("3"), 0, leafPageFlag) - - if len(n.inodes) != 3 { - t.Fatalf("exp=3; got=%d", len(n.inodes)) - } - if k, v := n.inodes[0].key, n.inodes[0].value; string(k) != "bar" || string(v) != "1" { - t.Fatalf("exp=; got=<%s,%s>", k, v) - } - if k, v := n.inodes[1].key, n.inodes[1].value; string(k) != "baz" || string(v) != "2" { - t.Fatalf("exp=; got=<%s,%s>", k, v) - } - if k, v := n.inodes[2].key, n.inodes[2].value; string(k) != "foo" || string(v) != "3" { - t.Fatalf("exp=; got=<%s,%s>", k, v) - } - if n.inodes[2].flags != uint32(leafPageFlag) { - t.Fatalf("not a leaf: %d", n.inodes[2].flags) - } -} - -// Ensure that a node can deserialize from a leaf page. -func TestNode_read_LeafPage(t *testing.T) { - // Create a page. - var buf [4096]byte - page := (*page)(unsafe.Pointer(&buf[0])) - page.flags = leafPageFlag - page.count = 2 - - // Insert 2 elements at the beginning. sizeof(leafPageElement) == 16 - nodes := (*[3]leafPageElement)(unsafe.Pointer(&page.ptr)) - nodes[0] = leafPageElement{flags: 0, pos: 32, ksize: 3, vsize: 4} // pos = sizeof(leafPageElement) * 2 - nodes[1] = leafPageElement{flags: 0, pos: 23, ksize: 10, vsize: 3} // pos = sizeof(leafPageElement) + 3 + 4 - - // Write data for the nodes at the end. - data := (*[4096]byte)(unsafe.Pointer(&nodes[2])) - copy(data[:], []byte("barfooz")) - copy(data[7:], []byte("helloworldbye")) - - // Deserialize page into a leaf. - n := &node{} - n.read(page) - - // Check that there are two inodes with correct data. - if !n.isLeaf { - t.Fatal("expected leaf") - } - if len(n.inodes) != 2 { - t.Fatalf("exp=2; got=%d", len(n.inodes)) - } - if k, v := n.inodes[0].key, n.inodes[0].value; string(k) != "bar" || string(v) != "fooz" { - t.Fatalf("exp=; got=<%s,%s>", k, v) - } - if k, v := n.inodes[1].key, n.inodes[1].value; string(k) != "helloworld" || string(v) != "bye" { - t.Fatalf("exp=; got=<%s,%s>", k, v) - } -} - -// Ensure that a node can serialize into a leaf page. -func TestNode_write_LeafPage(t *testing.T) { - // Create a node. - n := &node{isLeaf: true, inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}} - n.put([]byte("susy"), []byte("susy"), []byte("que"), 0, 0) - n.put([]byte("ricki"), []byte("ricki"), []byte("lake"), 0, 0) - n.put([]byte("john"), []byte("john"), []byte("johnson"), 0, 0) - - // Write it to a page. - var buf [4096]byte - p := (*page)(unsafe.Pointer(&buf[0])) - n.write(p) - - // Read the page back in. - n2 := &node{} - n2.read(p) - - // Check that the two pages are the same. - if len(n2.inodes) != 3 { - t.Fatalf("exp=3; got=%d", len(n2.inodes)) - } - if k, v := n2.inodes[0].key, n2.inodes[0].value; string(k) != "john" || string(v) != "johnson" { - t.Fatalf("exp=; got=<%s,%s>", k, v) - } - if k, v := n2.inodes[1].key, n2.inodes[1].value; string(k) != "ricki" || string(v) != "lake" { - t.Fatalf("exp=; got=<%s,%s>", k, v) - } - if k, v := n2.inodes[2].key, n2.inodes[2].value; string(k) != "susy" || string(v) != "que" { - t.Fatalf("exp=; got=<%s,%s>", k, v) - } -} - -// Ensure that a node can split into appropriate subgroups. -func TestNode_split(t *testing.T) { - // Create a node. - n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}} - n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000003"), []byte("00000003"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000004"), []byte("00000004"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000005"), []byte("00000005"), []byte("0123456701234567"), 0, 0) - - // Split between 2 & 3. - n.split(100) - - var parent = n.parent - if len(parent.children) != 2 { - t.Fatalf("exp=2; got=%d", len(parent.children)) - } - if len(parent.children[0].inodes) != 2 { - t.Fatalf("exp=2; got=%d", len(parent.children[0].inodes)) - } - if len(parent.children[1].inodes) != 3 { - t.Fatalf("exp=3; got=%d", len(parent.children[1].inodes)) - } -} - -// Ensure that a page with the minimum number of inodes just returns a single node. -func TestNode_split_MinKeys(t *testing.T) { - // Create a node. - n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}} - n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0) - - // Split. - n.split(20) - if n.parent != nil { - t.Fatalf("expected nil parent") - } -} - -// Ensure that a node that has keys that all fit on a page just returns one leaf. -func TestNode_split_SinglePage(t *testing.T) { - // Create a node. - n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}} - n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000003"), []byte("00000003"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000004"), []byte("00000004"), []byte("0123456701234567"), 0, 0) - n.put([]byte("00000005"), []byte("00000005"), []byte("0123456701234567"), 0, 0) - - // Split. - n.split(4096) - if n.parent != nil { - t.Fatalf("expected nil parent") - } -} diff --git a/installer/vendor/github.com/boltdb/bolt/page_test.go b/installer/vendor/github.com/boltdb/bolt/page_test.go deleted file mode 100644 index 59f4a30ed8..0000000000 --- a/installer/vendor/github.com/boltdb/bolt/page_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package bolt - -import ( - "reflect" - "sort" - "testing" - "testing/quick" -) - -// Ensure that the page type can be returned in human readable format. -func TestPage_typ(t *testing.T) { - if typ := (&page{flags: branchPageFlag}).typ(); typ != "branch" { - t.Fatalf("exp=branch; got=%v", typ) - } - if typ := (&page{flags: leafPageFlag}).typ(); typ != "leaf" { - t.Fatalf("exp=leaf; got=%v", typ) - } - if typ := (&page{flags: metaPageFlag}).typ(); typ != "meta" { - t.Fatalf("exp=meta; got=%v", typ) - } - if typ := (&page{flags: freelistPageFlag}).typ(); typ != "freelist" { - t.Fatalf("exp=freelist; got=%v", typ) - } - if typ := (&page{flags: 20000}).typ(); typ != "unknown<4e20>" { - t.Fatalf("exp=unknown<4e20>; got=%v", typ) - } -} - -// Ensure that the hexdump debugging function doesn't blow up. -func TestPage_dump(t *testing.T) { - (&page{id: 256}).hexdump(16) -} - -func TestPgids_merge(t *testing.T) { - a := pgids{4, 5, 6, 10, 11, 12, 13, 27} - b := pgids{1, 3, 8, 9, 25, 30} - c := a.merge(b) - if !reflect.DeepEqual(c, pgids{1, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 25, 27, 30}) { - t.Errorf("mismatch: %v", c) - } - - a = pgids{4, 5, 6, 10, 11, 12, 13, 27, 35, 36} - b = pgids{8, 9, 25, 30} - c = a.merge(b) - if !reflect.DeepEqual(c, pgids{4, 5, 6, 8, 9, 10, 11, 12, 13, 25, 27, 30, 35, 36}) { - t.Errorf("mismatch: %v", c) - } -} - -func TestPgids_merge_quick(t *testing.T) { - if err := quick.Check(func(a, b pgids) bool { - // Sort incoming lists. - sort.Sort(a) - sort.Sort(b) - - // Merge the two lists together. - got := a.merge(b) - - // The expected value should be the two lists combined and sorted. - exp := append(a, b...) - sort.Sort(exp) - - if !reflect.DeepEqual(exp, got) { - t.Errorf("\nexp=%+v\ngot=%+v\n", exp, got) - return false - } - - return true - }, nil); err != nil { - t.Fatal(err) - } -} diff --git a/installer/vendor/github.com/boltdb/bolt/quick_test.go b/installer/vendor/github.com/boltdb/bolt/quick_test.go deleted file mode 100644 index 9e27792e1a..0000000000 --- a/installer/vendor/github.com/boltdb/bolt/quick_test.go +++ /dev/null @@ -1,87 +0,0 @@ -package bolt_test - -import ( - "bytes" - "flag" - "fmt" - "math/rand" - "os" - "reflect" - "testing/quick" - "time" -) - -// testing/quick defaults to 5 iterations and a random seed. -// You can override these settings from the command line: -// -// -quick.count The number of iterations to perform. -// -quick.seed The seed to use for randomizing. -// -quick.maxitems The maximum number of items to insert into a DB. -// -quick.maxksize The maximum size of a key. -// -quick.maxvsize The maximum size of a value. -// - -var qcount, qseed, qmaxitems, qmaxksize, qmaxvsize int - -func init() { - flag.IntVar(&qcount, "quick.count", 5, "") - flag.IntVar(&qseed, "quick.seed", int(time.Now().UnixNano())%100000, "") - flag.IntVar(&qmaxitems, "quick.maxitems", 1000, "") - flag.IntVar(&qmaxksize, "quick.maxksize", 1024, "") - flag.IntVar(&qmaxvsize, "quick.maxvsize", 1024, "") - flag.Parse() - fmt.Fprintln(os.Stderr, "seed:", qseed) - fmt.Fprintf(os.Stderr, "quick settings: count=%v, items=%v, ksize=%v, vsize=%v\n", qcount, qmaxitems, qmaxksize, qmaxvsize) -} - -func qconfig() *quick.Config { - return &quick.Config{ - MaxCount: qcount, - Rand: rand.New(rand.NewSource(int64(qseed))), - } -} - -type testdata []testdataitem - -func (t testdata) Len() int { return len(t) } -func (t testdata) Swap(i, j int) { t[i], t[j] = t[j], t[i] } -func (t testdata) Less(i, j int) bool { return bytes.Compare(t[i].Key, t[j].Key) == -1 } - -func (t testdata) Generate(rand *rand.Rand, size int) reflect.Value { - n := rand.Intn(qmaxitems-1) + 1 - items := make(testdata, n) - used := make(map[string]bool) - for i := 0; i < n; i++ { - item := &items[i] - // Ensure that keys are unique by looping until we find one that we have not already used. - for { - item.Key = randByteSlice(rand, 1, qmaxksize) - if !used[string(item.Key)] { - used[string(item.Key)] = true - break - } - } - item.Value = randByteSlice(rand, 0, qmaxvsize) - } - return reflect.ValueOf(items) -} - -type revtestdata []testdataitem - -func (t revtestdata) Len() int { return len(t) } -func (t revtestdata) Swap(i, j int) { t[i], t[j] = t[j], t[i] } -func (t revtestdata) Less(i, j int) bool { return bytes.Compare(t[i].Key, t[j].Key) == 1 } - -type testdataitem struct { - Key []byte - Value []byte -} - -func randByteSlice(rand *rand.Rand, minSize, maxSize int) []byte { - n := rand.Intn(maxSize-minSize) + minSize - b := make([]byte, n) - for i := 0; i < n; i++ { - b[i] = byte(rand.Intn(255)) - } - return b -} diff --git a/installer/vendor/github.com/boltdb/bolt/simulation_test.go b/installer/vendor/github.com/boltdb/bolt/simulation_test.go deleted file mode 100644 index 3831016557..0000000000 --- a/installer/vendor/github.com/boltdb/bolt/simulation_test.go +++ /dev/null @@ -1,329 +0,0 @@ -package bolt_test - -import ( - "bytes" - "fmt" - "math/rand" - "sync" - "testing" - - "github.com/boltdb/bolt" -) - -func TestSimulate_1op_1p(t *testing.T) { testSimulate(t, 1, 1) } -func TestSimulate_10op_1p(t *testing.T) { testSimulate(t, 10, 1) } -func TestSimulate_100op_1p(t *testing.T) { testSimulate(t, 100, 1) } -func TestSimulate_1000op_1p(t *testing.T) { testSimulate(t, 1000, 1) } -func TestSimulate_10000op_1p(t *testing.T) { testSimulate(t, 10000, 1) } - -func TestSimulate_10op_10p(t *testing.T) { testSimulate(t, 10, 10) } -func TestSimulate_100op_10p(t *testing.T) { testSimulate(t, 100, 10) } -func TestSimulate_1000op_10p(t *testing.T) { testSimulate(t, 1000, 10) } -func TestSimulate_10000op_10p(t *testing.T) { testSimulate(t, 10000, 10) } - -func TestSimulate_100op_100p(t *testing.T) { testSimulate(t, 100, 100) } -func TestSimulate_1000op_100p(t *testing.T) { testSimulate(t, 1000, 100) } -func TestSimulate_10000op_100p(t *testing.T) { testSimulate(t, 10000, 100) } - -func TestSimulate_10000op_1000p(t *testing.T) { testSimulate(t, 10000, 1000) } - -// Randomly generate operations on a given database with multiple clients to ensure consistency and thread safety. -func testSimulate(t *testing.T, threadCount, parallelism int) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - - rand.Seed(int64(qseed)) - - // A list of operations that readers and writers can perform. - var readerHandlers = []simulateHandler{simulateGetHandler} - var writerHandlers = []simulateHandler{simulateGetHandler, simulatePutHandler} - - var versions = make(map[int]*QuickDB) - versions[1] = NewQuickDB() - - db := MustOpenDB() - defer db.MustClose() - - var mutex sync.Mutex - - // Run n threads in parallel, each with their own operation. - var wg sync.WaitGroup - var threads = make(chan bool, parallelism) - var i int - for { - threads <- true - wg.Add(1) - writable := ((rand.Int() % 100) < 20) // 20% writers - - // Choose an operation to execute. - var handler simulateHandler - if writable { - handler = writerHandlers[rand.Intn(len(writerHandlers))] - } else { - handler = readerHandlers[rand.Intn(len(readerHandlers))] - } - - // Execute a thread for the given operation. - go func(writable bool, handler simulateHandler) { - defer wg.Done() - - // Start transaction. - tx, err := db.Begin(writable) - if err != nil { - t.Fatal("tx begin: ", err) - } - - // Obtain current state of the dataset. - mutex.Lock() - var qdb = versions[tx.ID()] - if writable { - qdb = versions[tx.ID()-1].Copy() - } - mutex.Unlock() - - // Make sure we commit/rollback the tx at the end and update the state. - if writable { - defer func() { - mutex.Lock() - versions[tx.ID()] = qdb - mutex.Unlock() - - if err := tx.Commit(); err != nil { - t.Fatal(err) - } - }() - } else { - defer func() { _ = tx.Rollback() }() - } - - // Ignore operation if we don't have data yet. - if qdb == nil { - return - } - - // Execute handler. - handler(tx, qdb) - - // Release a thread back to the scheduling loop. - <-threads - }(writable, handler) - - i++ - if i > threadCount { - break - } - } - - // Wait until all threads are done. - wg.Wait() -} - -type simulateHandler func(tx *bolt.Tx, qdb *QuickDB) - -// Retrieves a key from the database and verifies that it is what is expected. -func simulateGetHandler(tx *bolt.Tx, qdb *QuickDB) { - // Randomly retrieve an existing exist. - keys := qdb.Rand() - if len(keys) == 0 { - return - } - - // Retrieve root bucket. - b := tx.Bucket(keys[0]) - if b == nil { - panic(fmt.Sprintf("bucket[0] expected: %08x\n", trunc(keys[0], 4))) - } - - // Drill into nested buckets. - for _, key := range keys[1 : len(keys)-1] { - b = b.Bucket(key) - if b == nil { - panic(fmt.Sprintf("bucket[n] expected: %v -> %v\n", keys, key)) - } - } - - // Verify key/value on the final bucket. - expected := qdb.Get(keys) - actual := b.Get(keys[len(keys)-1]) - if !bytes.Equal(actual, expected) { - fmt.Println("=== EXPECTED ===") - fmt.Println(expected) - fmt.Println("=== ACTUAL ===") - fmt.Println(actual) - fmt.Println("=== END ===") - panic("value mismatch") - } -} - -// Inserts a key into the database. -func simulatePutHandler(tx *bolt.Tx, qdb *QuickDB) { - var err error - keys, value := randKeys(), randValue() - - // Retrieve root bucket. - b := tx.Bucket(keys[0]) - if b == nil { - b, err = tx.CreateBucket(keys[0]) - if err != nil { - panic("create bucket: " + err.Error()) - } - } - - // Create nested buckets, if necessary. - for _, key := range keys[1 : len(keys)-1] { - child := b.Bucket(key) - if child != nil { - b = child - } else { - b, err = b.CreateBucket(key) - if err != nil { - panic("create bucket: " + err.Error()) - } - } - } - - // Insert into database. - if err := b.Put(keys[len(keys)-1], value); err != nil { - panic("put: " + err.Error()) - } - - // Insert into in-memory database. - qdb.Put(keys, value) -} - -// QuickDB is an in-memory database that replicates the functionality of the -// Bolt DB type except that it is entirely in-memory. It is meant for testing -// that the Bolt database is consistent. -type QuickDB struct { - sync.RWMutex - m map[string]interface{} -} - -// NewQuickDB returns an instance of QuickDB. -func NewQuickDB() *QuickDB { - return &QuickDB{m: make(map[string]interface{})} -} - -// Get retrieves the value at a key path. -func (db *QuickDB) Get(keys [][]byte) []byte { - db.RLock() - defer db.RUnlock() - - m := db.m - for _, key := range keys[:len(keys)-1] { - value := m[string(key)] - if value == nil { - return nil - } - switch value := value.(type) { - case map[string]interface{}: - m = value - case []byte: - return nil - } - } - - // Only return if it's a simple value. - if value, ok := m[string(keys[len(keys)-1])].([]byte); ok { - return value - } - return nil -} - -// Put inserts a value into a key path. -func (db *QuickDB) Put(keys [][]byte, value []byte) { - db.Lock() - defer db.Unlock() - - // Build buckets all the way down the key path. - m := db.m - for _, key := range keys[:len(keys)-1] { - if _, ok := m[string(key)].([]byte); ok { - return // Keypath intersects with a simple value. Do nothing. - } - - if m[string(key)] == nil { - m[string(key)] = make(map[string]interface{}) - } - m = m[string(key)].(map[string]interface{}) - } - - // Insert value into the last key. - m[string(keys[len(keys)-1])] = value -} - -// Rand returns a random key path that points to a simple value. -func (db *QuickDB) Rand() [][]byte { - db.RLock() - defer db.RUnlock() - if len(db.m) == 0 { - return nil - } - var keys [][]byte - db.rand(db.m, &keys) - return keys -} - -func (db *QuickDB) rand(m map[string]interface{}, keys *[][]byte) { - i, index := 0, rand.Intn(len(m)) - for k, v := range m { - if i == index { - *keys = append(*keys, []byte(k)) - if v, ok := v.(map[string]interface{}); ok { - db.rand(v, keys) - } - return - } - i++ - } - panic("quickdb rand: out-of-range") -} - -// Copy copies the entire database. -func (db *QuickDB) Copy() *QuickDB { - db.RLock() - defer db.RUnlock() - return &QuickDB{m: db.copy(db.m)} -} - -func (db *QuickDB) copy(m map[string]interface{}) map[string]interface{} { - clone := make(map[string]interface{}, len(m)) - for k, v := range m { - switch v := v.(type) { - case map[string]interface{}: - clone[k] = db.copy(v) - default: - clone[k] = v - } - } - return clone -} - -func randKey() []byte { - var min, max = 1, 1024 - n := rand.Intn(max-min) + min - b := make([]byte, n) - for i := 0; i < n; i++ { - b[i] = byte(rand.Intn(255)) - } - return b -} - -func randKeys() [][]byte { - var keys [][]byte - var count = rand.Intn(2) + 2 - for i := 0; i < count; i++ { - keys = append(keys, randKey()) - } - return keys -} - -func randValue() []byte { - n := rand.Intn(8192) - b := make([]byte, n) - for i := 0; i < n; i++ { - b[i] = byte(rand.Intn(255)) - } - return b -} diff --git a/installer/vendor/github.com/boltdb/bolt/tx_test.go b/installer/vendor/github.com/boltdb/bolt/tx_test.go deleted file mode 100644 index 2201e79288..0000000000 --- a/installer/vendor/github.com/boltdb/bolt/tx_test.go +++ /dev/null @@ -1,716 +0,0 @@ -package bolt_test - -import ( - "bytes" - "errors" - "fmt" - "log" - "os" - "testing" - - "github.com/boltdb/bolt" -) - -// Ensure that committing a closed transaction returns an error. -func TestTx_Commit_ErrTxClosed(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - tx, err := db.Begin(true) - if err != nil { - t.Fatal(err) - } - - if _, err := tx.CreateBucket([]byte("foo")); err != nil { - t.Fatal(err) - } - - if err := tx.Commit(); err != nil { - t.Fatal(err) - } - - if err := tx.Commit(); err != bolt.ErrTxClosed { - t.Fatalf("unexpected error: %s", err) - } -} - -// Ensure that rolling back a closed transaction returns an error. -func TestTx_Rollback_ErrTxClosed(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - tx, err := db.Begin(true) - if err != nil { - t.Fatal(err) - } - - if err := tx.Rollback(); err != nil { - t.Fatal(err) - } - if err := tx.Rollback(); err != bolt.ErrTxClosed { - t.Fatalf("unexpected error: %s", err) - } -} - -// Ensure that committing a read-only transaction returns an error. -func TestTx_Commit_ErrTxNotWritable(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - tx, err := db.Begin(false) - if err != nil { - t.Fatal(err) - } - if err := tx.Commit(); err != bolt.ErrTxNotWritable { - t.Fatal(err) - } -} - -// Ensure that a transaction can retrieve a cursor on the root bucket. -func TestTx_Cursor(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } - - if _, err := tx.CreateBucket([]byte("woojits")); err != nil { - t.Fatal(err) - } - - c := tx.Cursor() - if k, v := c.First(); !bytes.Equal(k, []byte("widgets")) { - t.Fatalf("unexpected key: %v", k) - } else if v != nil { - t.Fatalf("unexpected value: %v", v) - } - - if k, v := c.Next(); !bytes.Equal(k, []byte("woojits")) { - t.Fatalf("unexpected key: %v", k) - } else if v != nil { - t.Fatalf("unexpected value: %v", v) - } - - if k, v := c.Next(); k != nil { - t.Fatalf("unexpected key: %v", k) - } else if v != nil { - t.Fatalf("unexpected value: %v", k) - } - - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that creating a bucket with a read-only transaction returns an error. -func TestTx_CreateBucket_ErrTxNotWritable(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.View(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("foo")) - if err != bolt.ErrTxNotWritable { - t.Fatalf("unexpected error: %s", err) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that creating a bucket on a closed transaction returns an error. -func TestTx_CreateBucket_ErrTxClosed(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - tx, err := db.Begin(true) - if err != nil { - t.Fatal(err) - } - if err := tx.Commit(); err != nil { - t.Fatal(err) - } - - if _, err := tx.CreateBucket([]byte("foo")); err != bolt.ErrTxClosed { - t.Fatalf("unexpected error: %s", err) - } -} - -// Ensure that a Tx can retrieve a bucket. -func TestTx_Bucket(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } - if tx.Bucket([]byte("widgets")) == nil { - t.Fatal("expected bucket") - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a Tx retrieving a non-existent key returns nil. -func TestTx_Get_NotFound(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - if b.Get([]byte("no_such_key")) != nil { - t.Fatal("expected nil value") - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a bucket can be created and retrieved. -func TestTx_CreateBucket(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - // Create a bucket. - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } else if b == nil { - t.Fatal("expected bucket") - } - return nil - }); err != nil { - t.Fatal(err) - } - - // Read the bucket through a separate transaction. - if err := db.View(func(tx *bolt.Tx) error { - if tx.Bucket([]byte("widgets")) == nil { - t.Fatal("expected bucket") - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a bucket can be created if it doesn't already exist. -func TestTx_CreateBucketIfNotExists(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - // Create bucket. - if b, err := tx.CreateBucketIfNotExists([]byte("widgets")); err != nil { - t.Fatal(err) - } else if b == nil { - t.Fatal("expected bucket") - } - - // Create bucket again. - if b, err := tx.CreateBucketIfNotExists([]byte("widgets")); err != nil { - t.Fatal(err) - } else if b == nil { - t.Fatal("expected bucket") - } - - return nil - }); err != nil { - t.Fatal(err) - } - - // Read the bucket through a separate transaction. - if err := db.View(func(tx *bolt.Tx) error { - if tx.Bucket([]byte("widgets")) == nil { - t.Fatal("expected bucket") - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure transaction returns an error if creating an unnamed bucket. -func TestTx_CreateBucketIfNotExists_ErrBucketNameRequired(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucketIfNotExists([]byte{}); err != bolt.ErrBucketNameRequired { - t.Fatalf("unexpected error: %s", err) - } - - if _, err := tx.CreateBucketIfNotExists(nil); err != bolt.ErrBucketNameRequired { - t.Fatalf("unexpected error: %s", err) - } - - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a bucket cannot be created twice. -func TestTx_CreateBucket_ErrBucketExists(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - // Create a bucket. - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - // Create the same bucket again. - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != bolt.ErrBucketExists { - t.Fatalf("unexpected error: %s", err) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a bucket is created with a non-blank name. -func TestTx_CreateBucket_ErrBucketNameRequired(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket(nil); err != bolt.ErrBucketNameRequired { - t.Fatalf("unexpected error: %s", err) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that a bucket can be deleted. -func TestTx_DeleteBucket(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - // Create a bucket and add a value. - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - // Delete the bucket and make sure we can't get the value. - if err := db.Update(func(tx *bolt.Tx) error { - if err := tx.DeleteBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } - if tx.Bucket([]byte("widgets")) != nil { - t.Fatal("unexpected bucket") - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.Update(func(tx *bolt.Tx) error { - // Create the bucket again and make sure there's not a phantom value. - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if v := b.Get([]byte("foo")); v != nil { - t.Fatalf("unexpected phantom value: %v", v) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that deleting a bucket on a closed transaction returns an error. -func TestTx_DeleteBucket_ErrTxClosed(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - tx, err := db.Begin(true) - if err != nil { - t.Fatal(err) - } - if err := tx.Commit(); err != nil { - t.Fatal(err) - } - if err := tx.DeleteBucket([]byte("foo")); err != bolt.ErrTxClosed { - t.Fatalf("unexpected error: %s", err) - } -} - -// Ensure that deleting a bucket with a read-only transaction returns an error. -func TestTx_DeleteBucket_ReadOnly(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.View(func(tx *bolt.Tx) error { - if err := tx.DeleteBucket([]byte("foo")); err != bolt.ErrTxNotWritable { - t.Fatalf("unexpected error: %s", err) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that nothing happens when deleting a bucket that doesn't exist. -func TestTx_DeleteBucket_NotFound(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - if err := tx.DeleteBucket([]byte("widgets")); err != bolt.ErrBucketNotFound { - t.Fatalf("unexpected error: %s", err) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that no error is returned when a tx.ForEach function does not return -// an error. -func TestTx_ForEach_NoError(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - - if err := tx.ForEach(func(name []byte, b *bolt.Bucket) error { - return nil - }); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that an error is returned when a tx.ForEach function returns an error. -func TestTx_ForEach_WithError(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - - marker := errors.New("marker") - if err := tx.ForEach(func(name []byte, b *bolt.Bucket) error { - return marker - }); err != marker { - t.Fatalf("unexpected error: %s", err) - } - return nil - }); err != nil { - t.Fatal(err) - } -} - -// Ensure that Tx commit handlers are called after a transaction successfully commits. -func TestTx_OnCommit(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - var x int - if err := db.Update(func(tx *bolt.Tx) error { - tx.OnCommit(func() { x += 1 }) - tx.OnCommit(func() { x += 2 }) - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } else if x != 3 { - t.Fatalf("unexpected x: %d", x) - } -} - -// Ensure that Tx commit handlers are NOT called after a transaction rolls back. -func TestTx_OnCommit_Rollback(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - var x int - if err := db.Update(func(tx *bolt.Tx) error { - tx.OnCommit(func() { x += 1 }) - tx.OnCommit(func() { x += 2 }) - if _, err := tx.CreateBucket([]byte("widgets")); err != nil { - t.Fatal(err) - } - return errors.New("rollback this commit") - }); err == nil || err.Error() != "rollback this commit" { - t.Fatalf("unexpected error: %s", err) - } else if x != 0 { - t.Fatalf("unexpected x: %d", x) - } -} - -// Ensure that the database can be copied to a file path. -func TestTx_CopyFile(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - - path := tempfile() - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("baz"), []byte("bat")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.View(func(tx *bolt.Tx) error { - return tx.CopyFile(path, 0600) - }); err != nil { - t.Fatal(err) - } - - db2, err := bolt.Open(path, 0600, nil) - if err != nil { - t.Fatal(err) - } - - if err := db2.View(func(tx *bolt.Tx) error { - if v := tx.Bucket([]byte("widgets")).Get([]byte("foo")); !bytes.Equal(v, []byte("bar")) { - t.Fatalf("unexpected value: %v", v) - } - if v := tx.Bucket([]byte("widgets")).Get([]byte("baz")); !bytes.Equal(v, []byte("bat")) { - t.Fatalf("unexpected value: %v", v) - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db2.Close(); err != nil { - t.Fatal(err) - } -} - -type failWriterError struct{} - -func (failWriterError) Error() string { - return "error injected for tests" -} - -type failWriter struct { - // fail after this many bytes - After int -} - -func (f *failWriter) Write(p []byte) (n int, err error) { - n = len(p) - if n > f.After { - n = f.After - err = failWriterError{} - } - f.After -= n - return n, err -} - -// Ensure that Copy handles write errors right. -func TestTx_CopyFile_Error_Meta(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("baz"), []byte("bat")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.View(func(tx *bolt.Tx) error { - return tx.Copy(&failWriter{}) - }); err == nil || err.Error() != "meta 0 copy: error injected for tests" { - t.Fatalf("unexpected error: %v", err) - } -} - -// Ensure that Copy handles write errors right. -func TestTx_CopyFile_Error_Normal(t *testing.T) { - db := MustOpenDB() - defer db.MustClose() - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - t.Fatal(err) - } - if err := b.Put([]byte("baz"), []byte("bat")); err != nil { - t.Fatal(err) - } - return nil - }); err != nil { - t.Fatal(err) - } - - if err := db.View(func(tx *bolt.Tx) error { - return tx.Copy(&failWriter{3 * db.Info().PageSize}) - }); err == nil || err.Error() != "error injected for tests" { - t.Fatalf("unexpected error: %v", err) - } -} - -func ExampleTx_Rollback() { - // Open the database. - db, err := bolt.Open(tempfile(), 0666, nil) - if err != nil { - log.Fatal(err) - } - defer os.Remove(db.Path()) - - // Create a bucket. - if err := db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }); err != nil { - log.Fatal(err) - } - - // Set a value for a key. - if err := db.Update(func(tx *bolt.Tx) error { - return tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")) - }); err != nil { - log.Fatal(err) - } - - // Update the key but rollback the transaction so it never saves. - tx, err := db.Begin(true) - if err != nil { - log.Fatal(err) - } - b := tx.Bucket([]byte("widgets")) - if err := b.Put([]byte("foo"), []byte("baz")); err != nil { - log.Fatal(err) - } - if err := tx.Rollback(); err != nil { - log.Fatal(err) - } - - // Ensure that our original value is still set. - if err := db.View(func(tx *bolt.Tx) error { - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - fmt.Printf("The value for 'foo' is still: %s\n", value) - return nil - }); err != nil { - log.Fatal(err) - } - - // Close database to release file lock. - if err := db.Close(); err != nil { - log.Fatal(err) - } - - // Output: - // The value for 'foo' is still: bar -} - -func ExampleTx_CopyFile() { - // Open the database. - db, err := bolt.Open(tempfile(), 0666, nil) - if err != nil { - log.Fatal(err) - } - defer os.Remove(db.Path()) - - // Create a bucket and a key. - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("widgets")) - if err != nil { - return err - } - if err := b.Put([]byte("foo"), []byte("bar")); err != nil { - return err - } - return nil - }); err != nil { - log.Fatal(err) - } - - // Copy the database to another file. - toFile := tempfile() - if err := db.View(func(tx *bolt.Tx) error { - return tx.CopyFile(toFile, 0666) - }); err != nil { - log.Fatal(err) - } - defer os.Remove(toFile) - - // Open the cloned database. - db2, err := bolt.Open(toFile, 0666, nil) - if err != nil { - log.Fatal(err) - } - - // Ensure that the key exists in the copy. - if err := db2.View(func(tx *bolt.Tx) error { - value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) - fmt.Printf("The value for 'foo' in the clone is: %s\n", value) - return nil - }); err != nil { - log.Fatal(err) - } - - // Close database to release file lock. - if err := db.Close(); err != nil { - log.Fatal(err) - } - - if err := db2.Close(); err != nil { - log.Fatal(err) - } - - // Output: - // The value for 'foo' in the clone is: bar -} diff --git a/installer/vendor/github.com/cenkalti/backoff/LICENSE b/installer/vendor/github.com/cenkalti/backoff/LICENSE new file mode 100644 index 0000000000..89b8179965 --- /dev/null +++ b/installer/vendor/github.com/cenkalti/backoff/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Cenk Altı + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/installer/vendor/github.com/cenkalti/backoff/backoff.go b/installer/vendor/github.com/cenkalti/backoff/backoff.go new file mode 100644 index 0000000000..3676ee405d --- /dev/null +++ b/installer/vendor/github.com/cenkalti/backoff/backoff.go @@ -0,0 +1,66 @@ +// Package backoff implements backoff algorithms for retrying operations. +// +// Use Retry function for retrying operations that may fail. +// If Retry does not meet your needs, +// copy/paste the function into your project and modify as you wish. +// +// There is also Ticker type similar to time.Ticker. +// You can use it if you need to work with channels. +// +// See Examples section below for usage examples. +package backoff + +import "time" + +// BackOff is a backoff policy for retrying an operation. +type BackOff interface { + // NextBackOff returns the duration to wait before retrying the operation, + // or backoff. Stop to indicate that no more retries should be made. + // + // Example usage: + // + // duration := backoff.NextBackOff(); + // if (duration == backoff.Stop) { + // // Do not retry operation. + // } else { + // // Sleep for duration and retry operation. + // } + // + NextBackOff() time.Duration + + // Reset to initial state. + Reset() +} + +// Stop indicates that no more retries should be made for use in NextBackOff(). +const Stop time.Duration = -1 + +// ZeroBackOff is a fixed backoff policy whose backoff time is always zero, +// meaning that the operation is retried immediately without waiting, indefinitely. +type ZeroBackOff struct{} + +func (b *ZeroBackOff) Reset() {} + +func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 } + +// StopBackOff is a fixed backoff policy that always returns backoff.Stop for +// NextBackOff(), meaning that the operation should never be retried. +type StopBackOff struct{} + +func (b *StopBackOff) Reset() {} + +func (b *StopBackOff) NextBackOff() time.Duration { return Stop } + +// ConstantBackOff is a backoff policy that always returns the same backoff delay. +// This is in contrast to an exponential backoff policy, +// which returns a delay that grows longer as you call NextBackOff() over and over again. +type ConstantBackOff struct { + Interval time.Duration +} + +func (b *ConstantBackOff) Reset() {} +func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval } + +func NewConstantBackOff(d time.Duration) *ConstantBackOff { + return &ConstantBackOff{Interval: d} +} diff --git a/installer/vendor/github.com/cenkalti/backoff/context.go b/installer/vendor/github.com/cenkalti/backoff/context.go new file mode 100644 index 0000000000..5d15709254 --- /dev/null +++ b/installer/vendor/github.com/cenkalti/backoff/context.go @@ -0,0 +1,60 @@ +package backoff + +import ( + "time" + + "golang.org/x/net/context" +) + +// BackOffContext is a backoff policy that stops retrying after the context +// is canceled. +type BackOffContext interface { + BackOff + Context() context.Context +} + +type backOffContext struct { + BackOff + ctx context.Context +} + +// WithContext returns a BackOffContext with context ctx +// +// ctx must not be nil +func WithContext(b BackOff, ctx context.Context) BackOffContext { + if ctx == nil { + panic("nil context") + } + + if b, ok := b.(*backOffContext); ok { + return &backOffContext{ + BackOff: b.BackOff, + ctx: ctx, + } + } + + return &backOffContext{ + BackOff: b, + ctx: ctx, + } +} + +func ensureContext(b BackOff) BackOffContext { + if cb, ok := b.(BackOffContext); ok { + return cb + } + return WithContext(b, context.Background()) +} + +func (b *backOffContext) Context() context.Context { + return b.ctx +} + +func (b *backOffContext) NextBackOff() time.Duration { + select { + case <-b.Context().Done(): + return Stop + default: + return b.BackOff.NextBackOff() + } +} diff --git a/installer/vendor/github.com/cenkalti/backoff/exponential.go b/installer/vendor/github.com/cenkalti/backoff/exponential.go new file mode 100644 index 0000000000..d9de15a177 --- /dev/null +++ b/installer/vendor/github.com/cenkalti/backoff/exponential.go @@ -0,0 +1,158 @@ +package backoff + +import ( + "math/rand" + "time" +) + +/* +ExponentialBackOff is a backoff implementation that increases the backoff +period for each retry attempt using a randomization function that grows exponentially. + +NextBackOff() is calculated using the following formula: + + randomized interval = + RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) + +In other words NextBackOff() will range between the randomization factor +percentage below and above the retry interval. + +For example, given the following parameters: + + RetryInterval = 2 + RandomizationFactor = 0.5 + Multiplier = 2 + +the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, +multiplied by the exponential, that is, between 2 and 6 seconds. + +Note: MaxInterval caps the RetryInterval and not the randomized interval. + +If the time elapsed since an ExponentialBackOff instance is created goes past the +MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. + +The elapsed time can be reset by calling Reset(). + +Example: Given the following default arguments, for 10 tries the sequence will be, +and assuming we go over the MaxElapsedTime on the 10th try: + + Request # RetryInterval (seconds) Randomized Interval (seconds) + + 1 0.5 [0.25, 0.75] + 2 0.75 [0.375, 1.125] + 3 1.125 [0.562, 1.687] + 4 1.687 [0.8435, 2.53] + 5 2.53 [1.265, 3.795] + 6 3.795 [1.897, 5.692] + 7 5.692 [2.846, 8.538] + 8 8.538 [4.269, 12.807] + 9 12.807 [6.403, 19.210] + 10 19.210 backoff.Stop + +Note: Implementation is not thread-safe. +*/ +type ExponentialBackOff struct { + InitialInterval time.Duration + RandomizationFactor float64 + Multiplier float64 + MaxInterval time.Duration + // After MaxElapsedTime the ExponentialBackOff stops. + // It never stops if MaxElapsedTime == 0. + MaxElapsedTime time.Duration + Clock Clock + + currentInterval time.Duration + startTime time.Time + random *rand.Rand +} + +// Clock is an interface that returns current time for BackOff. +type Clock interface { + Now() time.Time +} + +// Default values for ExponentialBackOff. +const ( + DefaultInitialInterval = 500 * time.Millisecond + DefaultRandomizationFactor = 0.5 + DefaultMultiplier = 1.5 + DefaultMaxInterval = 60 * time.Second + DefaultMaxElapsedTime = 15 * time.Minute +) + +// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. +func NewExponentialBackOff() *ExponentialBackOff { + b := &ExponentialBackOff{ + InitialInterval: DefaultInitialInterval, + RandomizationFactor: DefaultRandomizationFactor, + Multiplier: DefaultMultiplier, + MaxInterval: DefaultMaxInterval, + MaxElapsedTime: DefaultMaxElapsedTime, + Clock: SystemClock, + random: rand.New(rand.NewSource(time.Now().UnixNano())), + } + b.Reset() + return b +} + +type systemClock struct{} + +func (t systemClock) Now() time.Time { + return time.Now() +} + +// SystemClock implements Clock interface that uses time.Now(). +var SystemClock = systemClock{} + +// Reset the interval back to the initial retry interval and restarts the timer. +func (b *ExponentialBackOff) Reset() { + b.currentInterval = b.InitialInterval + b.startTime = b.Clock.Now() +} + +// NextBackOff calculates the next backoff interval using the formula: +// Randomized interval = RetryInterval +/- (RandomizationFactor * RetryInterval) +func (b *ExponentialBackOff) NextBackOff() time.Duration { + // Make sure we have not gone over the maximum elapsed time. + if b.MaxElapsedTime != 0 && b.GetElapsedTime() > b.MaxElapsedTime { + return Stop + } + defer b.incrementCurrentInterval() + if b.random == nil { + b.random = rand.New(rand.NewSource(time.Now().UnixNano())) + } + return getRandomValueFromInterval(b.RandomizationFactor, b.random.Float64(), b.currentInterval) +} + +// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance +// is created and is reset when Reset() is called. +// +// The elapsed time is computed using time.Now().UnixNano(). It is +// safe to call even while the backoff policy is used by a running +// ticker. +func (b *ExponentialBackOff) GetElapsedTime() time.Duration { + return b.Clock.Now().Sub(b.startTime) +} + +// Increments the current interval by multiplying it with the multiplier. +func (b *ExponentialBackOff) incrementCurrentInterval() { + // Check for overflow, if overflow is detected set the current interval to the max interval. + if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { + b.currentInterval = b.MaxInterval + } else { + b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) + } +} + +// Returns a random value from the following interval: +// [randomizationFactor * currentInterval, randomizationFactor * currentInterval]. +func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { + var delta = randomizationFactor * float64(currentInterval) + var minInterval = float64(currentInterval) - delta + var maxInterval = float64(currentInterval) + delta + + // Get a random value from the range [minInterval, maxInterval]. + // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then + // we want a 33% chance for selecting either 1, 2 or 3. + return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) +} diff --git a/installer/vendor/github.com/cenkalti/backoff/retry.go b/installer/vendor/github.com/cenkalti/backoff/retry.go new file mode 100644 index 0000000000..5dbd825b5c --- /dev/null +++ b/installer/vendor/github.com/cenkalti/backoff/retry.go @@ -0,0 +1,78 @@ +package backoff + +import "time" + +// An Operation is executing by Retry() or RetryNotify(). +// The operation will be retried using a backoff policy if it returns an error. +type Operation func() error + +// Notify is a notify-on-error function. It receives an operation error and +// backoff delay if the operation failed (with an error). +// +// NOTE that if the backoff policy stated to stop retrying, +// the notify function isn't called. +type Notify func(error, time.Duration) + +// Retry the operation o until it does not return error or BackOff stops. +// o is guaranteed to be run at least once. +// It is the caller's responsibility to reset b after Retry returns. +// +// If o returns a *PermanentError, the operation is not retried, and the +// wrapped error is returned. +// +// Retry sleeps the goroutine for the duration returned by BackOff after a +// failed operation returns. +func Retry(o Operation, b BackOff) error { return RetryNotify(o, b, nil) } + +// RetryNotify calls notify function with the error and wait duration +// for each failed attempt before sleep. +func RetryNotify(operation Operation, b BackOff, notify Notify) error { + var err error + var next time.Duration + + cb := ensureContext(b) + + b.Reset() + for { + if err = operation(); err == nil { + return nil + } + + if permanent, ok := err.(*PermanentError); ok { + return permanent.Err + } + + if next = b.NextBackOff(); next == Stop { + return err + } + + if notify != nil { + notify(err, next) + } + + t := time.NewTimer(next) + + select { + case <-cb.Context().Done(): + t.Stop() + return err + case <-t.C: + } + } +} + +// PermanentError signals that the operation should not be retried. +type PermanentError struct { + Err error +} + +func (e *PermanentError) Error() string { + return e.Err.Error() +} + +// Permanent wraps the given err in a *PermanentError. +func Permanent(err error) *PermanentError { + return &PermanentError{ + Err: err, + } +} diff --git a/installer/vendor/github.com/cenkalti/backoff/ticker.go b/installer/vendor/github.com/cenkalti/backoff/ticker.go new file mode 100644 index 0000000000..e742512fd3 --- /dev/null +++ b/installer/vendor/github.com/cenkalti/backoff/ticker.go @@ -0,0 +1,84 @@ +package backoff + +import ( + "runtime" + "sync" + "time" +) + +// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff. +// +// Ticks will continue to arrive when the previous operation is still running, +// so operations that take a while to fail could run in quick succession. +type Ticker struct { + C <-chan time.Time + c chan time.Time + b BackOffContext + stop chan struct{} + stopOnce sync.Once +} + +// NewTicker returns a new Ticker containing a channel that will send +// the time at times specified by the BackOff argument. Ticker is +// guaranteed to tick at least once. The channel is closed when Stop +// method is called or BackOff stops. It is not safe to manipulate the +// provided backoff policy (notably calling NextBackOff or Reset) +// while the ticker is running. +func NewTicker(b BackOff) *Ticker { + c := make(chan time.Time) + t := &Ticker{ + C: c, + c: c, + b: ensureContext(b), + stop: make(chan struct{}), + } + t.b.Reset() + go t.run() + runtime.SetFinalizer(t, (*Ticker).Stop) + return t +} + +// Stop turns off a ticker. After Stop, no more ticks will be sent. +func (t *Ticker) Stop() { + t.stopOnce.Do(func() { close(t.stop) }) +} + +func (t *Ticker) run() { + c := t.c + defer close(c) + + // Ticker is guaranteed to tick at least once. + afterC := t.send(time.Now()) + + for { + if afterC == nil { + return + } + + select { + case tick := <-afterC: + afterC = t.send(tick) + case <-t.stop: + t.c = nil // Prevent future ticks from being sent to the channel. + return + case <-t.b.Context().Done(): + return + } + } +} + +func (t *Ticker) send(tick time.Time) <-chan time.Time { + select { + case t.c <- tick: + case <-t.stop: + return nil + } + + next := t.b.NextBackOff() + if next == Stop { + t.Stop() + return nil + } + + return time.After(next) +} diff --git a/installer/vendor/github.com/cenkalti/backoff/tries.go b/installer/vendor/github.com/cenkalti/backoff/tries.go new file mode 100644 index 0000000000..cfeefd9b76 --- /dev/null +++ b/installer/vendor/github.com/cenkalti/backoff/tries.go @@ -0,0 +1,35 @@ +package backoff + +import "time" + +/* +WithMaxRetries creates a wrapper around another BackOff, which will +return Stop if NextBackOff() has been called too many times since +the last time Reset() was called + +Note: Implementation is not thread-safe. +*/ +func WithMaxRetries(b BackOff, max uint64) BackOff { + return &backOffTries{delegate: b, maxTries: max} +} + +type backOffTries struct { + delegate BackOff + maxTries uint64 + numTries uint64 +} + +func (b *backOffTries) NextBackOff() time.Duration { + if b.maxTries > 0 { + if b.maxTries <= b.numTries { + return Stop + } + b.numTries++ + } + return b.delegate.NextBackOff() +} + +func (b *backOffTries) Reset() { + b.numTries = 0 + b.delegate.Reset() +} diff --git a/installer/vendor/github.com/coreos/go-systemd/.travis.yml b/installer/vendor/github.com/coreos/go-systemd/.travis.yml deleted file mode 100644 index 21fb7a01d6..0000000000 --- a/installer/vendor/github.com/coreos/go-systemd/.travis.yml +++ /dev/null @@ -1,29 +0,0 @@ -language: shell # We do everything inside Docker and don't want travis fiddling with steps or environment variables - -sudo: required - -services: - - docker - -env: - global: - - GOPATH=/opt - - BUILD_DIR=/opt/src/github.com/coreos/go-systemd - matrix: - - DOCKER_BASE=ubuntu:16.04 - - DOCKER_BASE=debian:stretch - -before_install: - - docker pull ${DOCKER_BASE} - - docker run --privileged -e GOPATH=${GOPATH} --cidfile=/tmp/cidfile ${DOCKER_BASE} /bin/bash -c "apt-get update && apt-get install -y build-essential git golang dbus libsystemd-dev libpam-systemd systemd-container && go get github.com/coreos/pkg/dlopen && go get github.com/godbus/dbus" - - docker commit `cat /tmp/cidfile` go-systemd/container-tests - - rm -f /tmp/cidfile - -install: - - docker run -d --cidfile=/tmp/cidfile --privileged -e GOPATH=${GOPATH} -v ${PWD}:${BUILD_DIR} go-systemd/container-tests /bin/systemd --system - -script: - - docker exec `cat /tmp/cidfile` /bin/bash -c "cd ${BUILD_DIR} && ./test" - -after_script: - - docker kill `cat /tmp/cidfile` diff --git a/installer/vendor/github.com/coreos/go-systemd/CONTRIBUTING.md b/installer/vendor/github.com/coreos/go-systemd/CONTRIBUTING.md deleted file mode 100644 index 0551ed53d3..0000000000 --- a/installer/vendor/github.com/coreos/go-systemd/CONTRIBUTING.md +++ /dev/null @@ -1,77 +0,0 @@ -# How to Contribute - -CoreOS projects are [Apache 2.0 licensed](LICENSE) and accept contributions via -GitHub pull requests. This document outlines some of the conventions on -development workflow, commit message formatting, contact points and other -resources to make it easier to get your contribution accepted. - -# Certificate of Origin - -By contributing to this project you agree to the Developer Certificate of -Origin (DCO). This document was created by the Linux Kernel community and is a -simple statement that you, as a contributor, have the legal right to make the -contribution. See the [DCO](DCO) file for details. - -# Email and Chat - -The project currently uses the general CoreOS email list and IRC channel: -- Email: [coreos-dev](https://groups.google.com/forum/#!forum/coreos-dev) -- IRC: #[coreos](irc://irc.freenode.org:6667/#coreos) IRC channel on freenode.org - -Please avoid emailing maintainers found in the MAINTAINERS file directly. They -are very busy and read the mailing lists. - -## Getting Started - -- Fork the repository on GitHub -- Read the [README](README.md) for build and test instructions -- Play with the project, submit bugs, submit patches! - -## Contribution Flow - -This is a rough outline of what a contributor's workflow looks like: - -- Create a topic branch from where you want to base your work (usually master). -- Make commits of logical units. -- Make sure your commit messages are in the proper format (see below). -- Push your changes to a topic branch in your fork of the repository. -- Make sure the tests pass, and add any new tests as appropriate. -- Submit a pull request to the original repository. - -Thanks for your contributions! - -### Coding Style - -CoreOS projects written in Go follow a set of style guidelines that we've documented -[here](https://github.com/coreos/docs/tree/master/golang). Please follow them when -working on your contributions. - -### Format of the Commit Message - -We follow a rough convention for commit messages that is designed to answer two -questions: what changed and why. The subject line should feature the what and -the body of the commit should describe the why. - -``` -scripts: add the test-cluster command - -this uses tmux to setup a test cluster that you can easily kill and -start for debugging. - -Fixes #38 -``` - -The format can be described more formally as follows: - -``` -: - - - -