diff --git a/go.mod b/go.mod index e065035..02853f9 100644 --- a/go.mod +++ b/go.mod @@ -5,6 +5,12 @@ require ( github.com/go-yaml/yaml v2.1.0+incompatible github.com/kr/pretty v0.1.0 // indirect github.com/sethvargo/go-password v0.1.2 + github.com/tidwall/btree v0.0.0-20170113224114-9876f1454cf0 // indirect + github.com/tidwall/buntdb v1.1.0 + github.com/tidwall/gjson v1.3.2 // indirect + github.com/tidwall/grect v0.0.0-20161006141115-ba9a043346eb // indirect + github.com/tidwall/rtree v0.0.0-20180113144539-6cd427091e0e // indirect + github.com/tidwall/tinyqueue v0.0.0-20180302190814-1e39f5511563 // indirect golang.org/x/crypto v0.0.0-20181112202954-3d3f9f413869 golang.org/x/net v0.0.0-20181114220301-adae6a3d119a gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect diff --git a/go.sum b/go.sum index 3c7d439..679597b 100644 --- a/go.sum +++ b/go.sum @@ -9,6 +9,22 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/sethvargo/go-password v0.1.2 h1:fhBF4thiPVKEZ7R6+CX46GWJiPyCyXshbeqZ7lqEeYo= github.com/sethvargo/go-password v0.1.2/go.mod h1:qKHfdSjT26DpHQWHWWR5+X4BI45jT31dg6j4RI2TEb0= +github.com/tidwall/btree v0.0.0-20170113224114-9876f1454cf0 h1:QnyrPZZvPmR0AtJCxxfCtI1qN+fYpKTKJ/5opWmZ34k= +github.com/tidwall/btree v0.0.0-20170113224114-9876f1454cf0/go.mod h1:huei1BkDWJ3/sLXmO+bsCNELL+Bp2Kks9OLyQFkzvA8= +github.com/tidwall/buntdb v1.1.0 h1:H6LzK59KiNjf1nHVPFrYj4Qnl8d8YLBsYamdL8N+Bao= +github.com/tidwall/buntdb v1.1.0/go.mod h1:Y39xhcDW10WlyYXeLgGftXVbjtM0QP+/kpz8xl9cbzE= +github.com/tidwall/gjson v1.3.2 h1:+7p3qQFaH3fOMXAJSrdZwGKcOO/lYdGS0HqGhPqDdTI= +github.com/tidwall/gjson v1.3.2/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= +github.com/tidwall/grect v0.0.0-20161006141115-ba9a043346eb h1:5NSYaAdrnblKByzd7XByQEJVT8+9v0W/tIY0Oo4OwrE= +github.com/tidwall/grect v0.0.0-20161006141115-ba9a043346eb/go.mod h1:lKYYLFIr9OIgdgrtgkZ9zgRxRdvPYsExnYBsEAd8W5M= +github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc= +github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= +github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/rtree v0.0.0-20180113144539-6cd427091e0e h1:+NL1GDIUOKxVfbp2KoJQD9cTQ6dyP2co9q4yzmT9FZo= +github.com/tidwall/rtree v0.0.0-20180113144539-6cd427091e0e/go.mod h1:/h+UnNGt0IhNNJLkGikcdcJqm66zGD/uJGMRxK/9+Ao= +github.com/tidwall/tinyqueue v0.0.0-20180302190814-1e39f5511563 h1:Otn9S136ELckZ3KKDyCkxapfufrqDqwmGjcHfAyXRrE= +github.com/tidwall/tinyqueue v0.0.0-20180302190814-1e39f5511563/go.mod h1:mLqSmt7Dv/CNneF2wfcChfN1rvapyQr01LGKnKex0DQ= golang.org/x/crypto v0.0.0-20181112202954-3d3f9f413869 h1:kkXA53yGe04D0adEYJwEVQjeBppL01Exg+fnMjfUraU= golang.org/x/crypto v0.0.0-20181112202954-3d3f9f413869/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a h1:gOpx8G595UYyvj8UK4+OFyY4rx037g3fmfhe5SasG3U= diff --git a/vendor/github.com/tidwall/btree/.travis.yml b/vendor/github.com/tidwall/btree/.travis.yml new file mode 100644 index 0000000..4f2ee4d --- /dev/null +++ b/vendor/github.com/tidwall/btree/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/github.com/tidwall/btree/LICENSE b/vendor/github.com/tidwall/btree/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/vendor/github.com/tidwall/btree/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/tidwall/btree/README.md b/vendor/github.com/tidwall/btree/README.md new file mode 100644 index 0000000..deb1e88 --- /dev/null +++ b/vendor/github.com/tidwall/btree/README.md @@ -0,0 +1,107 @@ +BTree implementation for Go +=========================== + +![Travis CI Build Status](https://api.travis-ci.org/tidwall/btree.svg?branch=master) +[![GoDoc](https://godoc.org/github.com/tidwall/btree?status.svg)](https://godoc.org/github.com/tidwall/btree) + +This package provides an in-memory B-Tree implementation for Go, useful as +an ordered, mutable data structure. + +This is a fork of the wonderful [google/btree](https://github.com/google/btree) package. It's has all the same great features and adds a few more. + +- Descend* functions for iterating backwards. +- Iteration performance boost. +- User defined context. + +User defined context +-------------------- +This is a great new feature that allows for entering the same item into multiple B-trees, and each B-tree have a different ordering formula. + +For example: + +```go +package main + +import ( + "fmt" + + "github.com/tidwall/btree" +) + +type Item struct { + Key, Val string +} + +func (i1 *Item) Less(item btree.Item, ctx interface{}) bool { + i2 := item.(*Item) + switch tag := ctx.(type) { + case string: + if tag == "vals" { + if i1.Val < i2.Val { + return true + } else if i1.Val > i2.Val { + return false + } + // Both vals are equal so we should fall though + // and let the key comparison take over. + } + } + return i1.Key < i2.Key +} + +func main() { + + // Create a tree for keys and a tree for values. + // The "keys" tree will be sorted on the Keys field. + // The "values" tree will be sorted on the Values field. + keys := btree.New(16, "keys") + vals := btree.New(16, "vals") + + // Create some items. + users := []*Item{ + &Item{Key: "user:1", Val: "Jane"}, + &Item{Key: "user:2", Val: "Andy"}, + &Item{Key: "user:3", Val: "Steve"}, + &Item{Key: "user:4", Val: "Andrea"}, + &Item{Key: "user:5", Val: "Janet"}, + &Item{Key: "user:6", Val: "Andy"}, + } + + // Insert each user into both trees + for _, user := range users { + keys.ReplaceOrInsert(user) + vals.ReplaceOrInsert(user) + } + + // Iterate over each user in the key tree + keys.Ascend(func(item btree.Item) bool { + kvi := item.(*Item) + fmt.Printf("%s %s\n", kvi.Key, kvi.Val) + return true + }) + + fmt.Printf("\n") + // Iterate over each user in the val tree + vals.Ascend(func(item btree.Item) bool { + kvi := item.(*Item) + fmt.Printf("%s %s\n", kvi.Key, kvi.Val) + return true + }) +} + +// Should see the results +/* +user:1 Jane +user:2 Andy +user:3 Steve +user:4 Andrea +user:5 Janet +user:6 Andy + +user:4 Andrea +user:2 Andy +user:6 Andy +user:1 Jane +user:3 Steve +*/ +``` diff --git a/vendor/github.com/tidwall/btree/btree.go b/vendor/github.com/tidwall/btree/btree.go new file mode 100644 index 0000000..26f0d23 --- /dev/null +++ b/vendor/github.com/tidwall/btree/btree.go @@ -0,0 +1,968 @@ +// Copyright 2014 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package btree implements in-memory B-Trees of arbitrary degree. +// +// btree implements an in-memory B-Tree for use as an ordered data structure. +// It is not meant for persistent storage solutions. +// +// It has a flatter structure than an equivalent red-black or other binary tree, +// which in some cases yields better memory usage and/or performance. +// See some discussion on the matter here: +// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html +// Note, though, that this project is in no way related to the C++ B-Tree +// implementation written about there. +// +// Within this tree, each node contains a slice of items and a (possibly nil) +// slice of children. For basic numeric values or raw structs, this can cause +// efficiency differences when compared to equivalent C++ template code that +// stores values in arrays within the node: +// * Due to the overhead of storing values as interfaces (each +// value needs to be stored as the value itself, then 2 words for the +// interface pointing to that value and its type), resulting in higher +// memory use. +// * Since interfaces can point to values anywhere in memory, values are +// most likely not stored in contiguous blocks, resulting in a higher +// number of cache misses. +// These issues don't tend to matter, though, when working with strings or other +// heap-allocated structures, since C++-equivalent structures also must store +// pointers and also distribute their values across the heap. +// +// This implementation is designed to be a drop-in replacement to gollrb.LLRB +// trees, (http://github.com/petar/gollrb), an excellent and probably the most +// widely used ordered tree implementation in the Go ecosystem currently. +// Its functions, therefore, exactly mirror those of +// llrb.LLRB where possible. Unlike gollrb, though, we currently don't +// support storing multiple equivalent values. +package btree + +import ( + "fmt" + "io" + "strings" + "sync" +) + +// Item represents a single object in the tree. +type Item interface { + // Less tests whether the current item is less than the given argument. + // + // This must provide a strict weak ordering. + // If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only + // hold one of either a or b in the tree). + // + // There is a user-defined ctx argument that is equal to the ctx value which + // is set at time of the btree contruction. + Less(than Item, ctx interface{}) bool +} + +const ( + DefaultFreeListSize = 32 +) + +var ( + nilItems = make(items, 16) + nilChildren = make(children, 16) +) + +// FreeList represents a free list of btree nodes. By default each +// BTree has its own FreeList, but multiple BTrees can share the same +// FreeList. +// Two Btrees using the same freelist are safe for concurrent write access. +type FreeList struct { + mu sync.Mutex + freelist []*node +} + +// NewFreeList creates a new free list. +// size is the maximum size of the returned free list. +func NewFreeList(size int) *FreeList { + return &FreeList{freelist: make([]*node, 0, size)} +} + +func (f *FreeList) newNode() (n *node) { + f.mu.Lock() + index := len(f.freelist) - 1 + if index < 0 { + f.mu.Unlock() + return new(node) + } + n = f.freelist[index] + f.freelist[index] = nil + f.freelist = f.freelist[:index] + f.mu.Unlock() + return +} + +func (f *FreeList) freeNode(n *node) { + f.mu.Lock() + if len(f.freelist) < cap(f.freelist) { + f.freelist = append(f.freelist, n) + } + f.mu.Unlock() +} + +// ItemIterator allows callers of Ascend* to iterate in-order over portions of +// the tree. When this function returns false, iteration will stop and the +// associated Ascend* function will immediately return. +type ItemIterator func(i Item) bool + +// New creates a new B-Tree with the given degree. +// +// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items +// and 2-4 children). +func New(degree int, ctx interface{}) *BTree { + return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize), ctx) +} + +// NewWithFreeList creates a new B-Tree that uses the given node free list. +func NewWithFreeList(degree int, f *FreeList, ctx interface{}) *BTree { + if degree <= 1 { + panic("bad degree") + } + return &BTree{ + degree: degree, + cow: ©OnWriteContext{freelist: f}, + ctx: ctx, + } +} + +// items stores items in a node. +type items []Item + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *items) insertAt(index int, item Item) { + *s = append(*s, nil) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = item +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *items) removeAt(index int) Item { + item := (*s)[index] + copy((*s)[index:], (*s)[index+1:]) + (*s)[len(*s)-1] = nil + *s = (*s)[:len(*s)-1] + return item +} + +// pop removes and returns the last element in the list. +func (s *items) pop() (out Item) { + index := len(*s) - 1 + out = (*s)[index] + (*s)[index] = nil + *s = (*s)[:index] + return +} + +// truncate truncates this instance at index so that it contains only the +// first index items. index must be less than or equal to length. +func (s *items) truncate(index int) { + var toClear items + *s, toClear = (*s)[:index], (*s)[index:] + for len(toClear) > 0 { + toClear = toClear[copy(toClear, nilItems):] + } +} + +// find returns the index where the given item should be inserted into this +// list. 'found' is true if the item already exists in the list at the given +// index. +func (s items) find(item Item, ctx interface{}) (index int, found bool) { + i, j := 0, len(s) + for i < j { + h := i + (j-i)/2 + if !item.Less(s[h], ctx) { + i = h + 1 + } else { + j = h + } + } + if i > 0 && !s[i-1].Less(item, ctx) { + return i - 1, true + } + return i, false +} + +// children stores child nodes in a node. +type children []*node + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *children) insertAt(index int, n *node) { + *s = append(*s, nil) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = n +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *children) removeAt(index int) *node { + n := (*s)[index] + copy((*s)[index:], (*s)[index+1:]) + (*s)[len(*s)-1] = nil + *s = (*s)[:len(*s)-1] + return n +} + +// pop removes and returns the last element in the list. +func (s *children) pop() (out *node) { + index := len(*s) - 1 + out = (*s)[index] + (*s)[index] = nil + *s = (*s)[:index] + return +} + +// truncate truncates this instance at index so that it contains only the +// first index children. index must be less than or equal to length. +func (s *children) truncate(index int) { + var toClear children + *s, toClear = (*s)[:index], (*s)[index:] + for len(toClear) > 0 { + toClear = toClear[copy(toClear, nilChildren):] + } +} + +// node is an internal node in a tree. +// +// It must at all times maintain the invariant that either +// * len(children) == 0, len(items) unconstrained +// * len(children) == len(items) + 1 +type node struct { + items items + children children + cow *copyOnWriteContext +} + +func (n *node) mutableFor(cow *copyOnWriteContext) *node { + if n.cow == cow { + return n + } + out := cow.newNode() + if cap(out.items) >= len(n.items) { + out.items = out.items[:len(n.items)] + } else { + out.items = make(items, len(n.items), cap(n.items)) + } + copy(out.items, n.items) + // Copy children + if cap(out.children) >= len(n.children) { + out.children = out.children[:len(n.children)] + } else { + out.children = make(children, len(n.children), cap(n.children)) + } + copy(out.children, n.children) + return out +} + +func (n *node) mutableChild(i int) *node { + c := n.children[i].mutableFor(n.cow) + n.children[i] = c + return c +} + +// split splits the given node at the given index. The current node shrinks, +// and this function returns the item that existed at that index and a new node +// containing all items/children after it. +func (n *node) split(i int) (Item, *node) { + item := n.items[i] + next := n.cow.newNode() + next.items = append(next.items, n.items[i+1:]...) + n.items.truncate(i) + if len(n.children) > 0 { + next.children = append(next.children, n.children[i+1:]...) + n.children.truncate(i + 1) + } + return item, next +} + +// maybeSplitChild checks if a child should be split, and if so splits it. +// Returns whether or not a split occurred. +func (n *node) maybeSplitChild(i, maxItems int) bool { + if len(n.children[i].items) < maxItems { + return false + } + first := n.mutableChild(i) + item, second := first.split(maxItems / 2) + n.items.insertAt(i, item) + n.children.insertAt(i+1, second) + return true +} + +// insert inserts an item into the subtree rooted at this node, making sure +// no nodes in the subtree exceed maxItems items. Should an equivalent item be +// be found/replaced by insert, it will be returned. +func (n *node) insert(item Item, maxItems int, ctx interface{}) Item { + i, found := n.items.find(item, ctx) + if found { + out := n.items[i] + n.items[i] = item + return out + } + if len(n.children) == 0 { + n.items.insertAt(i, item) + return nil + } + if n.maybeSplitChild(i, maxItems) { + inTree := n.items[i] + switch { + case item.Less(inTree, ctx): + // no change, we want first split node + case inTree.Less(item, ctx): + i++ // we want second split node + default: + out := n.items[i] + n.items[i] = item + return out + } + } + return n.mutableChild(i).insert(item, maxItems, ctx) +} + +// get finds the given key in the subtree and returns it. +func (n *node) get(key Item, ctx interface{}) Item { + i, found := n.items.find(key, ctx) + if found { + return n.items[i] + } else if len(n.children) > 0 { + return n.children[i].get(key, ctx) + } + return nil +} + +// min returns the first item in the subtree. +func min(n *node) Item { + if n == nil { + return nil + } + for len(n.children) > 0 { + n = n.children[0] + } + if len(n.items) == 0 { + return nil + } + return n.items[0] +} + +// max returns the last item in the subtree. +func max(n *node) Item { + if n == nil { + return nil + } + for len(n.children) > 0 { + n = n.children[len(n.children)-1] + } + if len(n.items) == 0 { + return nil + } + return n.items[len(n.items)-1] +} + +// toRemove details what item to remove in a node.remove call. +type toRemove int + +const ( + removeItem toRemove = iota // removes the given item + removeMin // removes smallest item in the subtree + removeMax // removes largest item in the subtree +) + +// remove removes an item from the subtree rooted at this node. +func (n *node) remove(item Item, minItems int, typ toRemove, ctx interface{}) Item { + var i int + var found bool + switch typ { + case removeMax: + if len(n.children) == 0 { + return n.items.pop() + } + i = len(n.items) + case removeMin: + if len(n.children) == 0 { + return n.items.removeAt(0) + } + i = 0 + case removeItem: + i, found = n.items.find(item, ctx) + if len(n.children) == 0 { + if found { + return n.items.removeAt(i) + } + return nil + } + default: + panic("invalid type") + } + // If we get to here, we have children. + if len(n.children[i].items) <= minItems { + return n.growChildAndRemove(i, item, minItems, typ, ctx) + } + child := n.mutableChild(i) + // Either we had enough items to begin with, or we've done some + // merging/stealing, because we've got enough now and we're ready to return + // stuff. + if found { + // The item exists at index 'i', and the child we've selected can give us a + // predecessor, since if we've gotten here it's got > minItems items in it. + out := n.items[i] + // We use our special-case 'remove' call with typ=maxItem to pull the + // predecessor of item i (the rightmost leaf of our immediate left child) + // and set it into where we pulled the item from. + n.items[i] = child.remove(nil, minItems, removeMax, ctx) + return out + } + // Final recursive call. Once we're here, we know that the item isn't in this + // node and that the child is big enough to remove from. + return child.remove(item, minItems, typ, ctx) +} + +// growChildAndRemove grows child 'i' to make sure it's possible to remove an +// item from it while keeping it at minItems, then calls remove to actually +// remove it. +// +// Most documentation says we have to do two sets of special casing: +// 1) item is in this node +// 2) item is in child +// In both cases, we need to handle the two subcases: +// A) node has enough values that it can spare one +// B) node doesn't have enough values +// For the latter, we have to check: +// a) left sibling has node to spare +// b) right sibling has node to spare +// c) we must merge +// To simplify our code here, we handle cases #1 and #2 the same: +// If a node doesn't have enough items, we make sure it does (using a,b,c). +// We then simply redo our remove call, and the second time (regardless of +// whether we're in case 1 or 2), we'll have enough items and can guarantee +// that we hit case A. +func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove, ctx interface{}) Item { + if i > 0 && len(n.children[i-1].items) > minItems { + // Steal from left child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i - 1) + stolenItem := stealFrom.items.pop() + child.items.insertAt(0, n.items[i-1]) + n.items[i-1] = stolenItem + if len(stealFrom.children) > 0 { + child.children.insertAt(0, stealFrom.children.pop()) + } + } else if i < len(n.items) && len(n.children[i+1].items) > minItems { + // steal from right child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i + 1) + stolenItem := stealFrom.items.removeAt(0) + child.items = append(child.items, n.items[i]) + n.items[i] = stolenItem + if len(stealFrom.children) > 0 { + child.children = append(child.children, stealFrom.children.removeAt(0)) + } + } else { + if i >= len(n.items) { + i-- + } + child := n.mutableChild(i) + // merge with right child + mergeItem := n.items.removeAt(i) + mergeChild := n.children.removeAt(i + 1) + child.items = append(child.items, mergeItem) + child.items = append(child.items, mergeChild.items...) + child.children = append(child.children, mergeChild.children...) + n.cow.freeNode(mergeChild) + } + return n.remove(item, minItems, typ, ctx) +} + +type direction int + +const ( + descend = direction(-1) + ascend = direction(+1) +) + +// iterate provides a simple method for iterating over elements in the tree. +// +// When ascending, the 'start' should be less than 'stop' and when descending, +// the 'start' should be greater than 'stop'. Setting 'includeStart' to true +// will force the iterator to include the first item when it equals 'start', +// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a +// "greaterThan" or "lessThan" queries. +func (n *node) iterate(dir direction, start, stop Item, includeStart bool, hit bool, iter ItemIterator, ctx interface{}) (bool, bool) { + var ok bool + switch dir { + case ascend: + for i := 0; i < len(n.items); i++ { + if start != nil && n.items[i].Less(start, ctx) { + continue + } + if len(n.children) > 0 { + if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter, ctx); !ok { + return hit, false + } + } + if !includeStart && !hit && start != nil && !start.Less(n.items[i], ctx) { + hit = true + continue + } + hit = true + if stop != nil && !n.items[i].Less(stop, ctx) { + return hit, false + } + if !iter(n.items[i]) { + return hit, false + } + } + if len(n.children) > 0 { + if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter, ctx); !ok { + return hit, false + } + } + case descend: + for i := len(n.items) - 1; i >= 0; i-- { + if start != nil && !n.items[i].Less(start, ctx) { + if !includeStart || hit || start.Less(n.items[i], ctx) { + continue + } + } + if len(n.children) > 0 { + if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter, ctx); !ok { + return hit, false + } + } + if stop != nil && !stop.Less(n.items[i], ctx) { + return hit, false // continue + } + hit = true + if !iter(n.items[i]) { + return hit, false + } + } + if len(n.children) > 0 { + if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter, ctx); !ok { + return hit, false + } + } + } + return hit, true +} + +// Used for testing/debugging purposes. +func (n *node) print(w io.Writer, level int) { + fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items) + for _, c := range n.children { + c.print(w, level+1) + } +} + +// BTree is an implementation of a B-Tree. +// +// BTree stores Item instances in an ordered structure, allowing easy insertion, +// removal, and iteration. +// +// Write operations are not safe for concurrent mutation by multiple +// goroutines, but Read operations are. +type BTree struct { + degree int + length int + root *node + ctx interface{} + cow *copyOnWriteContext +} + +// copyOnWriteContext pointers determine node ownership... a tree with a write +// context equivalent to a node's write context is allowed to modify that node. +// A tree whose write context does not match a node's is not allowed to modify +// it, and must create a new, writable copy (IE: it's a Clone). +// +// When doing any write operation, we maintain the invariant that the current +// node's context is equal to the context of the tree that requested the write. +// We do this by, before we descend into any node, creating a copy with the +// correct context if the contexts don't match. +// +// Since the node we're currently visiting on any write has the requesting +// tree's context, that node is modifiable in place. Children of that node may +// not share context, but before we descend into them, we'll make a mutable +// copy. +type copyOnWriteContext struct { + freelist *FreeList +} + +// Clone clones the btree, lazily. Clone should not be called concurrently, +// but the original tree (t) and the new tree (t2) can be used concurrently +// once the Clone call completes. +// +// The internal tree structure of b is marked read-only and shared between t and +// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes +// whenever one of b's original nodes would have been modified. Read operations +// should have no performance degredation. Write operations for both t and t2 +// will initially experience minor slow-downs caused by additional allocs and +// copies due to the aforementioned copy-on-write logic, but should converge to +// the original performance characteristics of the original tree. +func (t *BTree) Clone() (t2 *BTree) { + // Create two entirely new copy-on-write contexts. + // This operation effectively creates three trees: + // the original, shared nodes (old b.cow) + // the new b.cow nodes + // the new out.cow nodes + cow1, cow2 := *t.cow, *t.cow + out := *t + t.cow = &cow1 + out.cow = &cow2 + return &out +} + +// maxItems returns the max number of items to allow per node. +func (t *BTree) maxItems() int { + return t.degree*2 - 1 +} + +// minItems returns the min number of items to allow per node (ignored for the +// root node). +func (t *BTree) minItems() int { + return t.degree - 1 +} + +func (c *copyOnWriteContext) newNode() (n *node) { + n = c.freelist.newNode() + n.cow = c + return +} + +func (c *copyOnWriteContext) freeNode(n *node) { + if n.cow == c { + // clear to allow GC + n.items.truncate(0) + n.children.truncate(0) + n.cow = nil + c.freelist.freeNode(n) + } +} + +// ReplaceOrInsert adds the given item to the tree. If an item in the tree +// already equals the given one, it is removed from the tree and returned. +// Otherwise, nil is returned. +// +// nil cannot be added to the tree (will panic). +func (t *BTree) ReplaceOrInsert(item Item) Item { + if item == nil { + panic("nil item being added to BTree") + } + if t.root == nil { + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item) + t.length++ + return nil + } else { + t.root = t.root.mutableFor(t.cow) + if len(t.root.items) >= t.maxItems() { + item2, second := t.root.split(t.maxItems() / 2) + oldroot := t.root + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item2) + t.root.children = append(t.root.children, oldroot, second) + } + } + out := t.root.insert(item, t.maxItems(), t.ctx) + if out == nil { + t.length++ + } + return out +} + +// Delete removes an item equal to the passed in item from the tree, returning +// it. If no such item exists, returns nil. +func (t *BTree) Delete(item Item) Item { + return t.deleteItem(item, removeItem, t.ctx) +} + +// DeleteMin removes the smallest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMin() Item { + return t.deleteItem(nil, removeMin, t.ctx) +} + +// DeleteMax removes the largest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMax() Item { + return t.deleteItem(nil, removeMax, t.ctx) +} + +func (t *BTree) deleteItem(item Item, typ toRemove, ctx interface{}) Item { + if t.root == nil || len(t.root.items) == 0 { + return nil + } + t.root = t.root.mutableFor(t.cow) + out := t.root.remove(item, t.minItems(), typ, ctx) + if len(t.root.items) == 0 && len(t.root.children) > 0 { + oldroot := t.root + t.root = t.root.children[0] + t.cow.freeNode(oldroot) + } + if out != nil { + t.length-- + } + return out +} + +// AscendRange calls the iterator for every value in the tree within the range +// [greaterOrEqual, lessThan), until iterator returns false. +func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, greaterOrEqual, lessThan, true, false, iterator, t.ctx) +} + +// AscendLessThan calls the iterator for every value in the tree within the range +// [first, pivot), until iterator returns false. +func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, nil, pivot, false, false, iterator, t.ctx) +} + +// AscendGreaterOrEqual calls the iterator for every value in the tree within +// the range [pivot, last], until iterator returns false. +func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, pivot, nil, true, false, iterator, t.ctx) +} + +// Ascend calls the iterator for every value in the tree within the range +// [first, last], until iterator returns false. +func (t *BTree) Ascend(iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, nil, nil, false, false, iterator, t.ctx) +} + +// DescendRange calls the iterator for every value in the tree within the range +// [lessOrEqual, greaterThan), until iterator returns false. +func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, lessOrEqual, greaterThan, true, false, iterator, t.ctx) +} + +// DescendLessOrEqual calls the iterator for every value in the tree within the range +// [pivot, first], until iterator returns false. +func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, pivot, nil, true, false, iterator, t.ctx) +} + +// DescendGreaterThan calls the iterator for every value in the tree within +// the range (pivot, last], until iterator returns false. +func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, nil, pivot, false, false, iterator, t.ctx) +} + +// Descend calls the iterator for every value in the tree within the range +// [last, first], until iterator returns false. +func (t *BTree) Descend(iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, nil, nil, false, false, iterator, t.ctx) +} + +// Get looks for the key item in the tree, returning it. It returns nil if +// unable to find that item. +func (t *BTree) Get(key Item) Item { + if t.root == nil { + return nil + } + return t.root.get(key, t.ctx) +} + +// Min returns the smallest item in the tree, or nil if the tree is empty. +func (t *BTree) Min() Item { + return min(t.root) +} + +// Max returns the largest item in the tree, or nil if the tree is empty. +func (t *BTree) Max() Item { + return max(t.root) +} + +// Has returns true if the given key is in the tree. +func (t *BTree) Has(key Item) bool { + return t.Get(key) != nil +} + +// Len returns the number of items currently in the tree. +func (t *BTree) Len() int { + return t.length +} + +// Int implements the Item interface for integers. +type Int int + +// Less returns true if int(a) < int(b). +func (a Int) Less(b Item, ctx interface{}) bool { + return a < b.(Int) +} + +type stackItem struct { + n *node // current node + i int // index of the next child/item. +} + +// Cursor represents an iterator that can traverse over all items in the tree +// in sorted order. +// +// Changing data while traversing a cursor may result in unexpected items to +// be returned. You must reposition your cursor after mutating data. +type Cursor struct { + t *BTree + stack []stackItem +} + +// Cursor returns a new cursor used to traverse over items in the tree. +func (t *BTree) Cursor() *Cursor { + return &Cursor{t: t} +} + +// First moves the cursor to the first item in the tree and returns that item. +func (c *Cursor) First() Item { + c.stack = c.stack[:0] + n := c.t.root + if n == nil { + return nil + } + c.stack = append(c.stack, stackItem{n: n}) + for len(n.children) > 0 { + n = n.children[0] + c.stack = append(c.stack, stackItem{n: n}) + } + if len(n.items) == 0 { + return nil + } + return n.items[0] +} + +// Next moves the cursor to the next item and returns that item. +func (c *Cursor) Next() Item { + if len(c.stack) == 0 { + return nil + } + si := len(c.stack) - 1 + c.stack[si].i++ + n := c.stack[si].n + i := c.stack[si].i + if i == len(n.children)+len(n.items) { + c.stack = c.stack[:len(c.stack)-1] + return c.Next() + } + if len(n.children) == 0 { + if i >= len(n.items) { + c.stack = c.stack[:len(c.stack)-1] + return c.Next() + } + return n.items[i] + } else if i%2 == 1 { + return n.items[i/2] + } + c.stack = append(c.stack, stackItem{n: n.children[i/2], i: -1}) + return c.Next() + +} + +// Last moves the cursor to the last item in the tree and returns that item. +func (c *Cursor) Last() Item { + c.stack = c.stack[:0] + n := c.t.root + if n == nil { + return nil + } + c.stack = append(c.stack, stackItem{n: n, i: len(n.children) + len(n.items) - 1}) + for len(n.children) > 0 { + n = n.children[len(n.children)-1] + c.stack = append(c.stack, stackItem{n: n, i: len(n.children) + len(n.items) - 1}) + } + if len(n.items) == 0 { + return nil + } + return n.items[len(n.items)-1] +} + +// Prev moves the cursor to the previous item and returns that item. +func (c *Cursor) Prev() Item { + if len(c.stack) == 0 { + return nil + } + si := len(c.stack) - 1 + c.stack[si].i-- + n := c.stack[si].n + i := c.stack[si].i + if i == -1 { + c.stack = c.stack[:len(c.stack)-1] + return c.Prev() + } + if len(n.children) == 0 { + return n.items[i] + } else if i%2 == 1 { + return n.items[i/2] + } + child := n.children[i/2] + c.stack = append(c.stack, stackItem{n: child, + i: len(child.children) + len(child.items)}) + return c.Prev() +} + +// Seek moves the cursor to provided item and returns that item. +// If the item does not exist then the next item is returned. +func (c *Cursor) Seek(pivot Item) Item { + c.stack = c.stack[:0] + n := c.t.root + for n != nil { + i, found := n.items.find(pivot, c.t.ctx) + c.stack = append(c.stack, stackItem{n: n}) + if found { + if len(n.children) == 0 { + c.stack[len(c.stack)-1].i = i + } else { + c.stack[len(c.stack)-1].i = i*2 + 1 + } + return n.items[i] + } + if len(n.children) == 0 { + if i == len(n.items) { + c.stack[len(c.stack)-1].i = i + 1 + return c.Next() + } + c.stack[len(c.stack)-1].i = i + return n.items[i] + } + c.stack[len(c.stack)-1].i = i * 2 + n = n.children[i] + } + return nil +} diff --git a/vendor/github.com/tidwall/buntdb/.travis.yml b/vendor/github.com/tidwall/buntdb/.travis.yml new file mode 100644 index 0000000..4f2ee4d --- /dev/null +++ b/vendor/github.com/tidwall/buntdb/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/github.com/tidwall/buntdb/LICENSE b/vendor/github.com/tidwall/buntdb/LICENSE new file mode 100644 index 0000000..58f5819 --- /dev/null +++ b/vendor/github.com/tidwall/buntdb/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2016 Josh Baker + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/tidwall/buntdb/README.md b/vendor/github.com/tidwall/buntdb/README.md new file mode 100644 index 0000000..3179809 --- /dev/null +++ b/vendor/github.com/tidwall/buntdb/README.md @@ -0,0 +1,634 @@ +

+BuntDB +
+Build Status +Code Coverage +Go Report Card +GoDoc +

+ +BuntDB is a low-level, in-memory, key/value store in pure Go. +It persists to disk, is ACID compliant, and uses locking for multiple +readers and a single writer. It supports custom indexes and geospatial +data. It's ideal for projects that need a dependable database and favor +speed over data size. + +Features +======== + +- In-memory database for [fast reads and writes](#performance) +- Embeddable with a [simple API](https://godoc.org/github.com/tidwall/buntdb) +- [Spatial indexing](#spatial-indexes) for up to 20 dimensions; Useful for Geospatial data +- Index fields inside [JSON](#json-indexes) documents +- [Collate i18n Indexes](#collate-i18n-indexes) using the optional [collate package](https://github.com/tidwall/collate) +- Create [custom indexes](#custom-indexes) for any data type +- Support for [multi value indexes](#multi-value-index); Similar to a SQL multi column index +- [Built-in types](#built-in-types) that are easy to get up & running; String, Uint, Int, Float +- Flexible [iteration](#iterating) of data; ascending, descending, and ranges +- [Durable append-only file](#append-only-file) format for persistence +- Option to evict old items with an [expiration](#data-expiration) TTL +- Tight codebase, under 2K loc using the `cloc` command +- ACID semantics with locking [transactions](#transactions) that support rollbacks + + +Getting Started +=============== + +## Installing + +To start using BuntDB, install Go and run `go get`: + +```sh +$ go get -u github.com/tidwall/buntdb +``` + +This will retrieve the library. + + +## Opening a database + +The primary object in BuntDB is a `DB`. To open or create your +database, use the `buntdb.Open()` function: + +```go +package main + +import ( + "log" + + "github.com/tidwall/buntdb" +) + +func main() { + // Open the data.db file. It will be created if it doesn't exist. + db, err := buntdb.Open("data.db") + if err != nil { + log.Fatal(err) + } + defer db.Close() + + ... +} +``` + +It's also possible to open a database that does not persist to disk by using `:memory:` as the path of the file. + +```go +buntdb.Open(":memory:") // Open a file that does not persist to disk. +``` + +## Transactions +All reads and writes must be performed from inside a transaction. BuntDB can have one write transaction opened at a time, but can have many concurrent read transactions. Each transaction maintains a stable view of the database. In other words, once a transaction has begun, the data for that transaction cannot be changed by other transactions. + +Transactions run in a function that exposes a `Tx` object, which represents the transaction state. While inside a transaction, all database operations should be performed using this object. You should never access the origin `DB` object while inside a transaction. Doing so may have side-effects, such as blocking your application. + +When a transaction fails, it will roll back, and revert all changes that occurred to the database during that transaction. There's a single return value that you can use to close the transaction. For read/write transactions, returning an error this way will force the transaction to roll back. When a read/write transaction succeeds all changes are persisted to disk. + +### Read-only Transactions +A read-only transaction should be used when you don't need to make changes to the data. The advantage of a read-only transaction is that there can be many running concurrently. + +```go +err := db.View(func(tx *buntdb.Tx) error { + ... + return nil +}) +``` + +### Read/write Transactions +A read/write transaction is used when you need to make changes to your data. There can only be one read/write transaction running at a time. So make sure you close it as soon as you are done with it. + +```go +err := db.Update(func(tx *buntdb.Tx) error { + ... + return nil +}) +``` + +## Setting and getting key/values + +To set a value you must open a read/write transaction: + +```go +err := db.Update(func(tx *buntdb.Tx) error { + _, _, err := tx.Set("mykey", "myvalue", nil) + return err +}) +``` + + +To get the value: + +```go +err := db.View(func(tx *buntdb.Tx) error { + val, err := tx.Get("mykey") + if err != nil{ + return err + } + fmt.Printf("value is %s\n", val) + return nil +}) +``` + +Getting non-existent values will cause an `ErrNotFound` error. + +### Iterating +All keys/value pairs are ordered in the database by the key. To iterate over the keys: + +```go +err := db.View(func(tx *buntdb.Tx) error { + err := tx.Ascend("", func(key, value string) bool { + fmt.Printf("key: %s, value: %s\n", key, value) + }) + return err +}) +``` + +There is also `AscendGreaterOrEqual`, `AscendLessThan`, `AscendRange`, `AscendEqual`, `Descend`, `DescendLessOrEqual`, `DescendGreaterThan`, `DescendRange`, and `DescendEqual`. Please see the [documentation](https://godoc.org/github.com/tidwall/buntdb) for more information on these functions. + + +## Custom Indexes +Initially all data is stored in a single [B-tree](https://en.wikipedia.org/wiki/B-tree) with each item having one key and one value. All of these items are ordered by the key. This is great for quickly getting a value from a key or [iterating](#iterating) over the keys. Feel free to peruse the [B-tree implementation](https://github.com/tidwall/btree). + +You can also create custom indexes that allow for ordering and [iterating](#iterating) over values. A custom index also uses a B-tree, but it's more flexible because it allows for custom ordering. + +For example, let's say you want to create an index for ordering names: + +```go +db.CreateIndex("names", "*", buntdb.IndexString) +``` + +This will create an index named `names` which stores and sorts all values. The second parameter is a pattern that is used to filter on keys. A `*` wildcard argument means that we want to accept all keys. `IndexString` is a built-in function that performs case-insensitive ordering on the values + +Now you can add various names: + +```go +db.Update(func(tx *buntdb.Tx) error { + tx.Set("user:0:name", "tom", nil) + tx.Set("user:1:name", "Randi", nil) + tx.Set("user:2:name", "jane", nil) + tx.Set("user:4:name", "Janet", nil) + tx.Set("user:5:name", "Paula", nil) + tx.Set("user:6:name", "peter", nil) + tx.Set("user:7:name", "Terri", nil) + return nil +}) +``` + +Finally you can iterate over the index: + +```go +db.View(func(tx *buntdb.Tx) error { + tx.Ascend("names", func(key, val string) bool { + fmt.Printf(buf, "%s %s\n", key, val) + return true + }) + return nil +}) +``` +The output should be: +``` +user:2:name jane +user:4:name Janet +user:5:name Paula +user:6:name peter +user:1:name Randi +user:7:name Terri +user:0:name tom +``` + +The pattern parameter can be used to filter on keys like this: + +```go +db.CreateIndex("names", "user:*", buntdb.IndexString) +``` + +Now only items with keys that have the prefix `user:` will be added to the `names` index. + + +### Built-in types +Along with `IndexString`, there is also `IndexInt`, `IndexUint`, and `IndexFloat`. +These are built-in types for indexing. You can choose to use these or create your own. + +So to create an index that is numerically ordered on an age key, we could use: + +```go +db.CreateIndex("ages", "user:*:age", buntdb.IndexInt) +``` + +And then add values: + +```go +db.Update(func(tx *buntdb.Tx) error { + tx.Set("user:0:age", "35", nil) + tx.Set("user:1:age", "49", nil) + tx.Set("user:2:age", "13", nil) + tx.Set("user:4:age", "63", nil) + tx.Set("user:5:age", "8", nil) + tx.Set("user:6:age", "3", nil) + tx.Set("user:7:age", "16", nil) + return nil +}) +``` + +```go +db.View(func(tx *buntdb.Tx) error { + tx.Ascend("ages", func(key, val string) bool { + fmt.Printf(buf, "%s %s\n", key, val) + return true + }) + return nil +}) +``` + +The output should be: +``` +user:6:age 3 +user:5:age 8 +user:2:age 13 +user:7:age 16 +user:0:age 35 +user:1:age 49 +user:4:age 63 +``` + +## Spatial Indexes +BuntDB has support for spatial indexes by storing rectangles in an [R-tree](https://en.wikipedia.org/wiki/R-tree). An R-tree is organized in a similar manner as a [B-tree](https://en.wikipedia.org/wiki/B-tree), and both are balanced trees. But, an R-tree is special because it can operate on data that is in multiple dimensions. This is super handy for Geospatial applications. + +To create a spatial index use the `CreateSpatialIndex` function: + +```go +db.CreateSpatialIndex("fleet", "fleet:*:pos", buntdb.IndexRect) +``` + +Then `IndexRect` is a built-in function that converts rect strings to a format that the R-tree can use. It's easy to use this function out of the box, but you might find it better to create a custom one that renders from a different format, such as [Well-known text](https://en.wikipedia.org/wiki/Well-known_text) or [GeoJSON](http://geojson.org/). + +To add some lon,lat points to the `fleet` index: + +```go +db.Update(func(tx *buntdb.Tx) error { + tx.Set("fleet:0:pos", "[-115.567 33.532]", nil) + tx.Set("fleet:1:pos", "[-116.671 35.735]", nil) + tx.Set("fleet:2:pos", "[-113.902 31.234]", nil) + return nil +}) +``` + +And then you can run the `Intersects` function on the index: + +```go +db.View(func(tx *buntdb.Tx) error { + tx.Intersects("fleet", "[-117 30],[-112 36]", func(key, val string) bool { + ... + return true + }) + return nil +}) +``` + +This will get all three positions. + +### k-Nearest Neighbors + +Use the `Nearby` function to get all the positions in order of nearest to farthest : + +```go +db.View(func(tx *buntdb.Tx) error { + tx.Nearby("fleet", "[-113 33]", func(key, val string, dist float64) bool { + ... + return true + }) + return nil +}) +``` + +### Spatial bracket syntax + +The bracket syntax `[-117 30],[-112 36]` is unique to BuntDB, and it's how the built-in rectangles are processed. But, you are not limited to this syntax. Whatever Rect function you choose to use during `CreateSpatialIndex` will be used to process the parameter, in this case it's `IndexRect`. + +- **2D rectangle:** `[10 15],[20 25]` +*Min XY: "10x15", Max XY: "20x25"* + +- **3D rectangle:** `[10 15 12],[20 25 18]` +*Min XYZ: "10x15x12", Max XYZ: "20x25x18"* + +- **2D point:** `[10 15]` +*XY: "10x15"* + +- **LonLat point:** `[-112.2693 33.5123]` +*LatLon: "33.5123 -112.2693"* + +- **LonLat bounding box:** `[-112.26 33.51],[-112.18 33.67]` +*Min LatLon: "33.51 -112.26", Max LatLon: "33.67 -112.18"* + +**Notice:** The longitude is the Y axis and is on the left, and latitude is the X axis and is on the right. + +You can also represent `Infinity` by using `-inf` and `+inf`. +For example, you might have the following points (`[X Y M]` where XY is a point and M is a timestamp): +``` +[3 9 1] +[3 8 2] +[4 8 3] +[4 7 4] +[5 7 5] +[5 6 6] +``` + +You can then do a search for all points with `M` between 2-4 by calling `Intersects`. + +```go +tx.Intersects("points", "[-inf -inf 2],[+inf +inf 4]", func(key, val string) bool { + println(val) + return true +}) +``` + +Which will return: + +``` +[3 8 2] +[4 8 3] +[4 7 4] +``` + +## JSON Indexes +Indexes can be created on individual fields inside JSON documents. BuntDB uses [GJSON](https://github.com/tidwall/gjson) under the hood. + +For example: + +```go +package main + +import ( + "fmt" + + "github.com/tidwall/buntdb" +) + +func main() { + db, _ := buntdb.Open(":memory:") + db.CreateIndex("last_name", "*", buntdb.IndexJSON("name.last")) + db.CreateIndex("age", "*", buntdb.IndexJSON("age")) + db.Update(func(tx *buntdb.Tx) error { + tx.Set("1", `{"name":{"first":"Tom","last":"Johnson"},"age":38}`, nil) + tx.Set("2", `{"name":{"first":"Janet","last":"Prichard"},"age":47}`, nil) + tx.Set("3", `{"name":{"first":"Carol","last":"Anderson"},"age":52}`, nil) + tx.Set("4", `{"name":{"first":"Alan","last":"Cooper"},"age":28}`, nil) + return nil + }) + db.View(func(tx *buntdb.Tx) error { + fmt.Println("Order by last name") + tx.Ascend("last_name", func(key, value string) bool { + fmt.Printf("%s: %s\n", key, value) + return true + }) + fmt.Println("Order by age") + tx.Ascend("age", func(key, value string) bool { + fmt.Printf("%s: %s\n", key, value) + return true + }) + fmt.Println("Order by age range 30-50") + tx.AscendRange("age", `{"age":30}`, `{"age":50}`, func(key, value string) bool { + fmt.Printf("%s: %s\n", key, value) + return true + }) + return nil + }) +} +``` + +Results: + +``` +Order by last name +3: {"name":{"first":"Carol","last":"Anderson"},"age":52} +4: {"name":{"first":"Alan","last":"Cooper"},"age":28} +1: {"name":{"first":"Tom","last":"Johnson"},"age":38} +2: {"name":{"first":"Janet","last":"Prichard"},"age":47} + +Order by age +4: {"name":{"first":"Alan","last":"Cooper"},"age":28} +1: {"name":{"first":"Tom","last":"Johnson"},"age":38} +2: {"name":{"first":"Janet","last":"Prichard"},"age":47} +3: {"name":{"first":"Carol","last":"Anderson"},"age":52} + +Order by age range 30-50 +1: {"name":{"first":"Tom","last":"Johnson"},"age":38} +2: {"name":{"first":"Janet","last":"Prichard"},"age":47} +``` + +## Multi Value Index +With BuntDB it's possible to join multiple values on a single index. +This is similar to a [multi column index](http://dev.mysql.com/doc/refman/5.7/en/multiple-column-indexes.html) in a traditional SQL database. + +In this example we are creating a multi value index on "name.last" and "age": + +```go +db, _ := buntdb.Open(":memory:") +db.CreateIndex("last_name_age", "*", buntdb.IndexJSON("name.last"), buntdb.IndexJSON("age")) +db.Update(func(tx *buntdb.Tx) error { + tx.Set("1", `{"name":{"first":"Tom","last":"Johnson"},"age":38}`, nil) + tx.Set("2", `{"name":{"first":"Janet","last":"Prichard"},"age":47}`, nil) + tx.Set("3", `{"name":{"first":"Carol","last":"Anderson"},"age":52}`, nil) + tx.Set("4", `{"name":{"first":"Alan","last":"Cooper"},"age":28}`, nil) + tx.Set("5", `{"name":{"first":"Sam","last":"Anderson"},"age":51}`, nil) + tx.Set("6", `{"name":{"first":"Melinda","last":"Prichard"},"age":44}`, nil) + return nil +}) +db.View(func(tx *buntdb.Tx) error { + tx.Ascend("last_name_age", func(key, value string) bool { + fmt.Printf("%s: %s\n", key, value) + return true + }) + return nil +}) + +// Output: +// 5: {"name":{"first":"Sam","last":"Anderson"},"age":51} +// 3: {"name":{"first":"Carol","last":"Anderson"},"age":52} +// 4: {"name":{"first":"Alan","last":"Cooper"},"age":28} +// 1: {"name":{"first":"Tom","last":"Johnson"},"age":38} +// 6: {"name":{"first":"Melinda","last":"Prichard"},"age":44} +// 2: {"name":{"first":"Janet","last":"Prichard"},"age":47} +``` + +## Descending Ordered Index +Any index can be put in descending order by wrapping it's less function with `buntdb.Desc`. + +```go +db.CreateIndex("last_name_age", "*", +buntdb.IndexJSON("name.last"), +buntdb.Desc(buntdb.IndexJSON("age"))) +``` + +This will create a multi value index where the last name is ascending and the age is descending. + +## Collate i18n Indexes + +Using the external [collate package](https://github.com/tidwall/collate) it's possible to create +indexes that are sorted by the specified language. This is similar to the [SQL COLLATE keyword](https://msdn.microsoft.com/en-us/library/ms174596.aspx) found in traditional databases. + +To install: + +``` +go get -u github.com/tidwall/collate +``` + +For example: + +```go +import "github.com/tidwall/collate" + +// To sort case-insensitive in French. +db.CreateIndex("name", "*", collate.IndexString("FRENCH_CI")) + +// To specify that numbers should sort numerically ("2" < "12") +// and use a comma to represent a decimal point. +db.CreateIndex("amount", "*", collate.IndexString("FRENCH_NUM")) +``` + +There's also support for Collation on JSON indexes: + +```go +db.CreateIndex("last_name", "*", collate.IndexJSON("CHINESE_CI", "name.last")) +``` + +Check out the [collate project](https://github.com/tidwall/collate) for more information. + +## Data Expiration +Items can be automatically evicted by using the `SetOptions` object in the `Set` function to set a `TTL`. + +```go +db.Update(func(tx *buntdb.Tx) error { + tx.Set("mykey", "myval", &buntdb.SetOptions{Expires:true, TTL:time.Second}) + return nil +}) +``` + +Now `mykey` will automatically be deleted after one second. You can remove the TTL by setting the value again with the same key/value, but with the options parameter set to nil. + +## Delete while iterating +BuntDB does not currently support deleting a key while in the process of iterating. +As a workaround you'll need to delete keys following the completion of the iterator. + +```go +var delkeys []string +tx.AscendKeys("object:*", func(k, v string) bool { + if someCondition(k) == true { + delkeys = append(delkeys, k) + } + return true // continue +}) +for _, k := range delkeys { + if _, err = tx.Delete(k); err != nil { + return err + } +} +``` + +## Append-only File + +BuntDB uses an AOF (append-only file) which is a log of all database changes that occur from operations like `Set()` and `Delete()`. + +The format of this file looks like: +``` +set key:1 value1 +set key:2 value2 +set key:1 value3 +del key:2 +... +``` + +When the database opens again, it will read back the aof file and process each command in exact order. +This read process happens one time when the database opens. +From there on the file is only appended. + +As you may guess this log file can grow large over time. +There's a background routine that automatically shrinks the log file when it gets too large. +There is also a `Shrink()` function which will rewrite the aof file so that it contains only the items in the database. +The shrink operation does not lock up the database so read and write transactions can continue while shrinking is in process. + +### Durability and fsync + +By default BuntDB executes an `fsync` once every second on the [aof file](#append-only-file). Which simply means that there's a chance that up to one second of data might be lost. If you need higher durability then there's an optional database config setting `Config.SyncPolicy` which can be set to `Always`. + +The `Config.SyncPolicy` has the following options: + +- `Never` - fsync is managed by the operating system, less safe +- `EverySecond` - fsync every second, fast and safer, this is the default +- `Always` - fsync after every write, very durable, slower + +## Config + +Here are some configuration options that can be use to change various behaviors of the database. + +- **SyncPolicy** adjusts how often the data is synced to disk. This value can be Never, EverySecond, or Always. Default is EverySecond. +- **AutoShrinkPercentage** is used by the background process to trigger a shrink of the aof file when the size of the file is larger than the percentage of the result of the previous shrunk file. For example, if this value is 100, and the last shrink process resulted in a 100mb file, then the new aof file must be 200mb before a shrink is triggered. Default is 100. +- **AutoShrinkMinSize** defines the minimum size of the aof file before an automatic shrink can occur. Default is 32MB. +- **AutoShrinkDisabled** turns off automatic background shrinking. Default is false. + +To update the configuration you should call `ReadConfig` followed by `SetConfig`. For example: + +```go + +var config buntdb.Config +if err := db.ReadConfig(&config); err != nil{ + log.Fatal(err) +} +if err := db.WriteConfig(config); err != nil{ + log.Fatal(err) +} +``` + +## Performance + +How fast is BuntDB? + +Here are some example [benchmarks](https://github.com/tidwall/raft-buntdb#raftstore-performance-comparison) when using BuntDB in a Raft Store implementation. + +You can also run the standard Go benchmark tool from the project root directory: + +``` +go test --bench=. +``` + +### BuntDB-Benchmark + +There's a [custom utility](https://github.com/tidwall/buntdb-benchmark) that was created specifically for benchmarking BuntDB. + +*These are the results from running the benchmarks on a MacBook Pro 15" 2.8 GHz Intel Core i7:* + +``` +$ buntdb-benchmark -q +GET: 4609604.74 operations per second +SET: 248500.33 operations per second +ASCEND_100: 2268998.79 operations per second +ASCEND_200: 1178388.14 operations per second +ASCEND_400: 679134.20 operations per second +ASCEND_800: 348445.55 operations per second +DESCEND_100: 2313821.69 operations per second +DESCEND_200: 1292738.38 operations per second +DESCEND_400: 675258.76 operations per second +DESCEND_800: 337481.67 operations per second +SPATIAL_SET: 134824.60 operations per second +SPATIAL_INTERSECTS_100: 939491.47 operations per second +SPATIAL_INTERSECTS_200: 561590.40 operations per second +SPATIAL_INTERSECTS_400: 306951.15 operations per second +SPATIAL_INTERSECTS_800: 159673.91 operations per second +``` + +To install this utility: + +``` +go get github.com/tidwall/buntdb-benchmark +``` + + + +## Contact +Josh Baker [@tidwall](http://twitter.com/tidwall) + +## License + +BuntDB source code is available under the MIT [License](/LICENSE). diff --git a/vendor/github.com/tidwall/buntdb/buntdb.go b/vendor/github.com/tidwall/buntdb/buntdb.go new file mode 100644 index 0000000..35f8520 --- /dev/null +++ b/vendor/github.com/tidwall/buntdb/buntdb.go @@ -0,0 +1,2195 @@ +// Package buntdb implements a low-level in-memory key/value store in pure Go. +// It persists to disk, is ACID compliant, and uses locking for multiple +// readers and a single writer. Bunt is ideal for projects that need +// a dependable database, and favor speed over data size. +package buntdb + +import ( + "bufio" + "errors" + "io" + "os" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/tidwall/btree" + "github.com/tidwall/gjson" + "github.com/tidwall/grect" + "github.com/tidwall/match" + "github.com/tidwall/rtree" +) + +var ( + // ErrTxNotWritable is returned when performing a write operation on a + // read-only transaction. + ErrTxNotWritable = errors.New("tx not writable") + + // ErrTxClosed is returned when committing or rolling back a transaction + // that has already been committed or rolled back. + ErrTxClosed = errors.New("tx closed") + + // ErrNotFound is returned when an item or index is not in the database. + ErrNotFound = errors.New("not found") + + // ErrInvalid is returned when the database file is an invalid format. + ErrInvalid = errors.New("invalid database") + + // ErrDatabaseClosed is returned when the database is closed. + ErrDatabaseClosed = errors.New("database closed") + + // ErrIndexExists is returned when an index already exists in the database. + ErrIndexExists = errors.New("index exists") + + // ErrInvalidOperation is returned when an operation cannot be completed. + ErrInvalidOperation = errors.New("invalid operation") + + // ErrInvalidSyncPolicy is returned for an invalid SyncPolicy value. + ErrInvalidSyncPolicy = errors.New("invalid sync policy") + + // ErrShrinkInProcess is returned when a shrink operation is in-process. + ErrShrinkInProcess = errors.New("shrink is in-process") + + // ErrPersistenceActive is returned when post-loading data from an database + // not opened with Open(":memory:"). + ErrPersistenceActive = errors.New("persistence active") + + // ErrTxIterating is returned when Set or Delete are called while iterating. + ErrTxIterating = errors.New("tx is iterating") +) + +// DB represents a collection of key-value pairs that persist on disk. +// Transactions are used for all forms of data access to the DB. +type DB struct { + mu sync.RWMutex // the gatekeeper for all fields + file *os.File // the underlying file + buf []byte // a buffer to write to + keys *btree.BTree // a tree of all item ordered by key + exps *btree.BTree // a tree of items ordered by expiration + idxs map[string]*index // the index trees. + exmgr bool // indicates that expires manager is running. + flushes int // a count of the number of disk flushes + closed bool // set when the database has been closed + config Config // the database configuration + persist bool // do we write to disk + shrinking bool // when an aof shrink is in-process. + lastaofsz int // the size of the last shrink aof size +} + +// SyncPolicy represents how often data is synced to disk. +type SyncPolicy int + +const ( + // Never is used to disable syncing data to disk. + // The faster and less safe method. + Never SyncPolicy = 0 + // EverySecond is used to sync data to disk every second. + // It's pretty fast and you can lose 1 second of data if there + // is a disaster. + // This is the recommended setting. + EverySecond = 1 + // Always is used to sync data after every write to disk. + // Slow. Very safe. + Always = 2 +) + +// Config represents database configuration options. These +// options are used to change various behaviors of the database. +type Config struct { + // SyncPolicy adjusts how often the data is synced to disk. + // This value can be Never, EverySecond, or Always. + // The default is EverySecond. + SyncPolicy SyncPolicy + + // AutoShrinkPercentage is used by the background process to trigger + // a shrink of the aof file when the size of the file is larger than the + // percentage of the result of the previous shrunk file. + // For example, if this value is 100, and the last shrink process + // resulted in a 100mb file, then the new aof file must be 200mb before + // a shrink is triggered. + AutoShrinkPercentage int + + // AutoShrinkMinSize defines the minimum size of the aof file before + // an automatic shrink can occur. + AutoShrinkMinSize int + + // AutoShrinkDisabled turns off automatic background shrinking + AutoShrinkDisabled bool + + // OnExpired is used to custom handle the deletion option when a key + // has been expired. + OnExpired func(keys []string) + + // OnExpiredSync will be called inside the same transaction that is performing + // the deletion of expired items. If OnExpired is present then this callback + // will not be called. If this callback is present, then the deletion of the + // timeed-out item is the explicit responsibility of this callback. + OnExpiredSync func(key, value string, tx *Tx) error +} + +// exctx is a simple b-tree context for ordering by expiration. +type exctx struct { + db *DB +} + +// Default number of btree degrees +const btreeDegrees = 64 + +// Open opens a database at the provided path. +// If the file does not exist then it will be created automatically. +func Open(path string) (*DB, error) { + db := &DB{} + // initialize trees and indexes + db.keys = btree.New(btreeDegrees, nil) + db.exps = btree.New(btreeDegrees, &exctx{db}) + db.idxs = make(map[string]*index) + // initialize default configuration + db.config = Config{ + SyncPolicy: EverySecond, + AutoShrinkPercentage: 100, + AutoShrinkMinSize: 32 * 1024 * 1024, + } + // turn off persistence for pure in-memory + db.persist = path != ":memory:" + if db.persist { + var err error + // hardcoding 0666 as the default mode. + db.file, err = os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0666) + if err != nil { + return nil, err + } + // load the database from disk + if err := db.load(); err != nil { + // close on error, ignore close error + _ = db.file.Close() + return nil, err + } + } + // start the background manager. + go db.backgroundManager() + return db, nil +} + +// Close releases all database resources. +// All transactions must be closed before closing the database. +func (db *DB) Close() error { + db.mu.Lock() + defer db.mu.Unlock() + if db.closed { + return ErrDatabaseClosed + } + db.closed = true + if db.persist { + db.file.Sync() // do a sync but ignore the error + if err := db.file.Close(); err != nil { + return err + } + } + // Let's release all references to nil. This will help both with debugging + // late usage panics and it provides a hint to the garbage collector + db.keys, db.exps, db.idxs, db.file = nil, nil, nil, nil + return nil +} + +// Save writes a snapshot of the database to a writer. This operation blocks all +// writes, but not reads. This can be used for snapshots and backups for pure +// in-memory databases using the ":memory:". Database that persist to disk +// can be snapshotted by simply copying the database file. +func (db *DB) Save(wr io.Writer) error { + var err error + db.mu.RLock() + defer db.mu.RUnlock() + // use a buffered writer and flush every 4MB + var buf []byte + // iterated through every item in the database and write to the buffer + db.keys.Ascend(func(item btree.Item) bool { + dbi := item.(*dbItem) + buf = dbi.writeSetTo(buf) + if len(buf) > 1024*1024*4 { + // flush when buffer is over 4MB + _, err = wr.Write(buf) + if err != nil { + return false + } + buf = buf[:0] + } + return true + }) + if err != nil { + return err + } + // one final flush + if len(buf) > 0 { + _, err = wr.Write(buf) + if err != nil { + return err + } + } + return nil +} + +// Load loads commands from reader. This operation blocks all reads and writes. +// Note that this can only work for fully in-memory databases opened with +// Open(":memory:"). +func (db *DB) Load(rd io.Reader) error { + db.mu.Lock() + defer db.mu.Unlock() + if db.persist { + // cannot load into databases that persist to disk + return ErrPersistenceActive + } + return db.readLoad(rd, time.Now()) +} + +// index represents a b-tree or r-tree index and also acts as the +// b-tree/r-tree context for itself. +type index struct { + btr *btree.BTree // contains the items + rtr *rtree.RTree // contains the items + name string // name of the index + pattern string // a required key pattern + less func(a, b string) bool // less comparison function + rect func(item string) (min, max []float64) // rect from string function + db *DB // the origin database + opts IndexOptions // index options +} + +// match matches the pattern to the key +func (idx *index) match(key string) bool { + if idx.pattern == "*" { + return true + } + if idx.opts.CaseInsensitiveKeyMatching { + for i := 0; i < len(key); i++ { + if key[i] >= 'A' && key[i] <= 'Z' { + key = strings.ToLower(key) + break + } + } + } + return match.Match(key, idx.pattern) +} + +// clearCopy creates a copy of the index, but with an empty dataset. +func (idx *index) clearCopy() *index { + // copy the index meta information + nidx := &index{ + name: idx.name, + pattern: idx.pattern, + db: idx.db, + less: idx.less, + rect: idx.rect, + opts: idx.opts, + } + // initialize with empty trees + if nidx.less != nil { + nidx.btr = btree.New(btreeDegrees, nidx) + } + if nidx.rect != nil { + nidx.rtr = rtree.New(nidx) + } + return nidx +} + +// rebuild rebuilds the index +func (idx *index) rebuild() { + // initialize trees + if idx.less != nil { + idx.btr = btree.New(btreeDegrees, idx) + } + if idx.rect != nil { + idx.rtr = rtree.New(idx) + } + // iterate through all keys and fill the index + idx.db.keys.Ascend(func(item btree.Item) bool { + dbi := item.(*dbItem) + if !idx.match(dbi.key) { + // does not match the pattern, conintue + return true + } + if idx.less != nil { + idx.btr.ReplaceOrInsert(dbi) + } + if idx.rect != nil { + idx.rtr.Insert(dbi) + } + return true + }) +} + +// CreateIndex builds a new index and populates it with items. +// The items are ordered in an b-tree and can be retrieved using the +// Ascend* and Descend* methods. +// An error will occur if an index with the same name already exists. +// +// When a pattern is provided, the index will be populated with +// keys that match the specified pattern. This is a very simple pattern +// match where '*' matches on any number characters and '?' matches on +// any one character. +// The less function compares if string 'a' is less than string 'b'. +// It allows for indexes to create custom ordering. It's possible +// that the strings may be textual or binary. It's up to the provided +// less function to handle the content format and comparison. +// There are some default less function that can be used such as +// IndexString, IndexBinary, etc. +// +// Deprecated: Use Transactions +func (db *DB) CreateIndex(name, pattern string, + less ...func(a, b string) bool) error { + return db.Update(func(tx *Tx) error { + return tx.CreateIndex(name, pattern, less...) + }) +} + +// ReplaceIndex builds a new index and populates it with items. +// The items are ordered in an b-tree and can be retrieved using the +// Ascend* and Descend* methods. +// If a previous index with the same name exists, that index will be deleted. +// +// Deprecated: Use Transactions +func (db *DB) ReplaceIndex(name, pattern string, + less ...func(a, b string) bool) error { + return db.Update(func(tx *Tx) error { + err := tx.CreateIndex(name, pattern, less...) + if err != nil { + if err == ErrIndexExists { + err := tx.DropIndex(name) + if err != nil { + return err + } + return tx.CreateIndex(name, pattern, less...) + } + return err + } + return nil + }) +} + +// CreateSpatialIndex builds a new index and populates it with items. +// The items are organized in an r-tree and can be retrieved using the +// Intersects method. +// An error will occur if an index with the same name already exists. +// +// The rect function converts a string to a rectangle. The rectangle is +// represented by two arrays, min and max. Both arrays may have a length +// between 1 and 20, and both arrays must match in length. A length of 1 is a +// one dimensional rectangle, and a length of 4 is a four dimension rectangle. +// There is support for up to 20 dimensions. +// The values of min must be less than the values of max at the same dimension. +// Thus min[0] must be less-than-or-equal-to max[0]. +// The IndexRect is a default function that can be used for the rect +// parameter. +// +// Deprecated: Use Transactions +func (db *DB) CreateSpatialIndex(name, pattern string, + rect func(item string) (min, max []float64)) error { + return db.Update(func(tx *Tx) error { + return tx.CreateSpatialIndex(name, pattern, rect) + }) +} + +// ReplaceSpatialIndex builds a new index and populates it with items. +// The items are organized in an r-tree and can be retrieved using the +// Intersects method. +// If a previous index with the same name exists, that index will be deleted. +// +// Deprecated: Use Transactions +func (db *DB) ReplaceSpatialIndex(name, pattern string, + rect func(item string) (min, max []float64)) error { + return db.Update(func(tx *Tx) error { + err := tx.CreateSpatialIndex(name, pattern, rect) + if err != nil { + if err == ErrIndexExists { + err := tx.DropIndex(name) + if err != nil { + return err + } + return tx.CreateSpatialIndex(name, pattern, rect) + } + return err + } + return nil + }) +} + +// DropIndex removes an index. +// +// Deprecated: Use Transactions +func (db *DB) DropIndex(name string) error { + return db.Update(func(tx *Tx) error { + return tx.DropIndex(name) + }) +} + +// Indexes returns a list of index names. +// +// Deprecated: Use Transactions +func (db *DB) Indexes() ([]string, error) { + var names []string + var err = db.View(func(tx *Tx) error { + var err error + names, err = tx.Indexes() + return err + }) + return names, err +} + +// ReadConfig returns the database configuration. +func (db *DB) ReadConfig(config *Config) error { + db.mu.RLock() + defer db.mu.RUnlock() + if db.closed { + return ErrDatabaseClosed + } + *config = db.config + return nil +} + +// SetConfig updates the database configuration. +func (db *DB) SetConfig(config Config) error { + db.mu.Lock() + defer db.mu.Unlock() + if db.closed { + return ErrDatabaseClosed + } + switch config.SyncPolicy { + default: + return ErrInvalidSyncPolicy + case Never, EverySecond, Always: + } + db.config = config + return nil +} + +// insertIntoDatabase performs inserts an item in to the database and updates +// all indexes. If a previous item with the same key already exists, that item +// will be replaced with the new one, and return the previous item. +func (db *DB) insertIntoDatabase(item *dbItem) *dbItem { + var pdbi *dbItem + prev := db.keys.ReplaceOrInsert(item) + if prev != nil { + // A previous item was removed from the keys tree. Let's + // fully delete this item from all indexes. + pdbi = prev.(*dbItem) + if pdbi.opts != nil && pdbi.opts.ex { + // Remove it from the exipres tree. + db.exps.Delete(pdbi) + } + for _, idx := range db.idxs { + if idx.btr != nil { + // Remove it from the btree index. + idx.btr.Delete(pdbi) + } + if idx.rtr != nil { + // Remove it from the rtree index. + idx.rtr.Remove(pdbi) + } + } + } + if item.opts != nil && item.opts.ex { + // The new item has eviction options. Add it to the + // expires tree + db.exps.ReplaceOrInsert(item) + } + for _, idx := range db.idxs { + if !idx.match(item.key) { + continue + } + if idx.btr != nil { + // Add new item to btree index. + idx.btr.ReplaceOrInsert(item) + } + if idx.rtr != nil { + // Add new item to rtree index. + idx.rtr.Insert(item) + } + } + // we must return the previous item to the caller. + return pdbi +} + +// deleteFromDatabase removes and item from the database and indexes. The input +// item must only have the key field specified thus "&dbItem{key: key}" is all +// that is needed to fully remove the item with the matching key. If an item +// with the matching key was found in the database, it will be removed and +// returned to the caller. A nil return value means that the item was not +// found in the database +func (db *DB) deleteFromDatabase(item *dbItem) *dbItem { + var pdbi *dbItem + prev := db.keys.Delete(item) + if prev != nil { + pdbi = prev.(*dbItem) + if pdbi.opts != nil && pdbi.opts.ex { + // Remove it from the exipres tree. + db.exps.Delete(pdbi) + } + for _, idx := range db.idxs { + if idx.btr != nil { + // Remove it from the btree index. + idx.btr.Delete(pdbi) + } + if idx.rtr != nil { + // Remove it from the rtree index. + idx.rtr.Remove(pdbi) + } + } + } + return pdbi +} + +// backgroundManager runs continuously in the background and performs various +// operations such as removing expired items and syncing to disk. +func (db *DB) backgroundManager() { + flushes := 0 + t := time.NewTicker(time.Second) + defer t.Stop() + for range t.C { + var shrink bool + // Open a standard view. This will take a full lock of the + // database thus allowing for access to anything we need. + var onExpired func([]string) + var expired []*dbItem + var onExpiredSync func(key, value string, tx *Tx) error + err := db.Update(func(tx *Tx) error { + onExpired = db.config.OnExpired + if onExpired == nil { + onExpiredSync = db.config.OnExpiredSync + } + if db.persist && !db.config.AutoShrinkDisabled { + pos, err := db.file.Seek(0, 1) + if err != nil { + return err + } + aofsz := int(pos) + if aofsz > db.config.AutoShrinkMinSize { + prc := float64(db.config.AutoShrinkPercentage) / 100.0 + shrink = aofsz > db.lastaofsz+int(float64(db.lastaofsz)*prc) + } + } + // produce a list of expired items that need removing + db.exps.AscendLessThan(&dbItem{ + opts: &dbItemOpts{ex: true, exat: time.Now()}, + }, func(item btree.Item) bool { + expired = append(expired, item.(*dbItem)) + return true + }) + if onExpired == nil && onExpiredSync == nil { + for _, itm := range expired { + if _, err := tx.Delete(itm.key); err != nil { + // it's ok to get a "not found" because the + // 'Delete' method reports "not found" for + // expired items. + if err != ErrNotFound { + return err + } + } + } + } else if onExpiredSync != nil { + for _, itm := range expired { + if err := onExpiredSync(itm.key, itm.val, tx); err != nil { + return err + } + } + } + return nil + }) + if err == ErrDatabaseClosed { + break + } + + // send expired event, if needed + if onExpired != nil && len(expired) > 0 { + keys := make([]string, 0, 32) + for _, itm := range expired { + keys = append(keys, itm.key) + } + onExpired(keys) + } + + // execute a disk sync, if needed + func() { + db.mu.Lock() + defer db.mu.Unlock() + if db.persist && db.config.SyncPolicy == EverySecond && + flushes != db.flushes { + _ = db.file.Sync() + flushes = db.flushes + } + }() + if shrink { + if err = db.Shrink(); err != nil { + if err == ErrDatabaseClosed { + break + } + } + } + } +} + +// Shrink will make the database file smaller by removing redundant +// log entries. This operation does not block the database. +func (db *DB) Shrink() error { + db.mu.Lock() + if db.closed { + db.mu.Unlock() + return ErrDatabaseClosed + } + if !db.persist { + // The database was opened with ":memory:" as the path. + // There is no persistence, and no need to do anything here. + db.mu.Unlock() + return nil + } + if db.shrinking { + // The database is already in the process of shrinking. + db.mu.Unlock() + return ErrShrinkInProcess + } + db.shrinking = true + defer func() { + db.mu.Lock() + db.shrinking = false + db.mu.Unlock() + }() + fname := db.file.Name() + tmpname := fname + ".tmp" + // the endpos is used to return to the end of the file when we are + // finished writing all of the current items. + endpos, err := db.file.Seek(0, 2) + if err != nil { + return err + } + db.mu.Unlock() + time.Sleep(time.Second / 4) // wait just a bit before starting + f, err := os.Create(tmpname) + if err != nil { + return err + } + defer func() { + _ = f.Close() + _ = os.RemoveAll(tmpname) + }() + + // we are going to read items in as chunks as to not hold up the database + // for too long. + var buf []byte + pivot := "" + done := false + for !done { + err := func() error { + db.mu.RLock() + defer db.mu.RUnlock() + if db.closed { + return ErrDatabaseClosed + } + done = true + var n int + db.keys.AscendGreaterOrEqual(&dbItem{key: pivot}, + func(item btree.Item) bool { + dbi := item.(*dbItem) + // 1000 items or 64MB buffer + if n > 1000 || len(buf) > 64*1024*1024 { + pivot = dbi.key + done = false + return false + } + buf = dbi.writeSetTo(buf) + n++ + return true + }, + ) + if len(buf) > 0 { + if _, err := f.Write(buf); err != nil { + return err + } + buf = buf[:0] + } + return nil + }() + if err != nil { + return err + } + } + // We reached this far so all of the items have been written to a new tmp + // There's some more work to do by appending the new line from the aof + // to the tmp file and finally swap the files out. + return func() error { + // We're wrapping this in a function to get the benefit of a defered + // lock/unlock. + db.mu.Lock() + defer db.mu.Unlock() + if db.closed { + return ErrDatabaseClosed + } + // We are going to open a new version of the aof file so that we do + // not change the seek position of the previous. This may cause a + // problem in the future if we choose to use syscall file locking. + aof, err := os.Open(fname) + if err != nil { + return err + } + defer func() { _ = aof.Close() }() + if _, err := aof.Seek(endpos, 0); err != nil { + return err + } + // Just copy all of the new commands that have occurred since we + // started the shrink process. + if _, err := io.Copy(f, aof); err != nil { + return err + } + // Close all files + if err := aof.Close(); err != nil { + return err + } + if err := f.Close(); err != nil { + return err + } + if err := db.file.Close(); err != nil { + return err + } + // Any failures below here is really bad. So just panic. + if err := os.Rename(tmpname, fname); err != nil { + panic(err) + } + db.file, err = os.OpenFile(fname, os.O_CREATE|os.O_RDWR, 0666) + if err != nil { + panic(err) + } + pos, err := db.file.Seek(0, 2) + if err != nil { + return err + } + db.lastaofsz = int(pos) + return nil + }() +} + +var errValidEOF = errors.New("valid eof") + +// readLoad reads from the reader and loads commands into the database. +// modTime is the modified time of the reader, should be no greater than +// the current time.Now(). +func (db *DB) readLoad(rd io.Reader, modTime time.Time) error { + data := make([]byte, 4096) + parts := make([]string, 0, 8) + r := bufio.NewReader(rd) + for { + // read a single command. + // first we should read the number of parts that the of the command + line, err := r.ReadBytes('\n') + if err != nil { + if len(line) > 0 { + // got an eof but also data. this should be an unexpected eof. + return io.ErrUnexpectedEOF + } + if err == io.EOF { + break + } + return err + } + if line[0] != '*' { + return ErrInvalid + } + // convert the string number to and int + var n int + if len(line) == 4 && line[len(line)-2] == '\r' { + if line[1] < '0' || line[1] > '9' { + return ErrInvalid + } + n = int(line[1] - '0') + } else { + if len(line) < 5 || line[len(line)-2] != '\r' { + return ErrInvalid + } + for i := 1; i < len(line)-2; i++ { + if line[i] < '0' || line[i] > '9' { + return ErrInvalid + } + n = n*10 + int(line[i]-'0') + } + } + // read each part of the command. + parts = parts[:0] + for i := 0; i < n; i++ { + // read the number of bytes of the part. + line, err := r.ReadBytes('\n') + if err != nil { + return err + } + if line[0] != '$' { + return ErrInvalid + } + // convert the string number to and int + var n int + if len(line) == 4 && line[len(line)-2] == '\r' { + if line[1] < '0' || line[1] > '9' { + return ErrInvalid + } + n = int(line[1] - '0') + } else { + if len(line) < 5 || line[len(line)-2] != '\r' { + return ErrInvalid + } + for i := 1; i < len(line)-2; i++ { + if line[i] < '0' || line[i] > '9' { + return ErrInvalid + } + n = n*10 + int(line[i]-'0') + } + } + // resize the read buffer + if len(data) < n+2 { + dataln := len(data) + for dataln < n+2 { + dataln *= 2 + } + data = make([]byte, dataln) + } + if _, err = io.ReadFull(r, data[:n+2]); err != nil { + return err + } + if data[n] != '\r' || data[n+1] != '\n' { + return ErrInvalid + } + // copy string + parts = append(parts, string(data[:n])) + } + // finished reading the command + + if len(parts) == 0 { + continue + } + if (parts[0][0] == 's' || parts[0][1] == 'S') && + (parts[0][1] == 'e' || parts[0][1] == 'E') && + (parts[0][2] == 't' || parts[0][2] == 'T') { + // SET + if len(parts) < 3 || len(parts) == 4 || len(parts) > 5 { + return ErrInvalid + } + if len(parts) == 5 { + if strings.ToLower(parts[3]) != "ex" { + return ErrInvalid + } + ex, err := strconv.ParseInt(parts[4], 10, 64) + if err != nil { + return err + } + now := time.Now() + dur := (time.Duration(ex) * time.Second) - now.Sub(modTime) + if dur > 0 { + db.insertIntoDatabase(&dbItem{ + key: parts[1], + val: parts[2], + opts: &dbItemOpts{ + ex: true, + exat: now.Add(dur), + }, + }) + } + } else { + db.insertIntoDatabase(&dbItem{key: parts[1], val: parts[2]}) + } + } else if (parts[0][0] == 'd' || parts[0][1] == 'D') && + (parts[0][1] == 'e' || parts[0][1] == 'E') && + (parts[0][2] == 'l' || parts[0][2] == 'L') { + // DEL + if len(parts) != 2 { + return ErrInvalid + } + db.deleteFromDatabase(&dbItem{key: parts[1]}) + } else if (parts[0][0] == 'f' || parts[0][1] == 'F') && + strings.ToLower(parts[0]) == "flushdb" { + db.keys = btree.New(btreeDegrees, nil) + db.exps = btree.New(btreeDegrees, &exctx{db}) + db.idxs = make(map[string]*index) + } else { + return ErrInvalid + } + } + return nil +} + +// load reads entries from the append only database file and fills the database. +// The file format uses the Redis append only file format, which is and a series +// of RESP commands. For more information on RESP please read +// http://redis.io/topics/protocol. The only supported RESP commands are DEL and +// SET. +func (db *DB) load() error { + fi, err := db.file.Stat() + if err != nil { + return err + } + if err := db.readLoad(db.file, fi.ModTime()); err != nil { + return err + } + pos, err := db.file.Seek(0, 2) + if err != nil { + return err + } + db.lastaofsz = int(pos) + return nil +} + +// managed calls a block of code that is fully contained in a transaction. +// This method is intended to be wrapped by Update and View +func (db *DB) managed(writable bool, fn func(tx *Tx) error) (err error) { + var tx *Tx + tx, err = db.Begin(writable) + if err != nil { + return + } + defer func() { + if err != nil { + // The caller returned an error. We must rollback. + _ = tx.Rollback() + return + } + if writable { + // Everything went well. Lets Commit() + err = tx.Commit() + } else { + // read-only transaction can only roll back. + err = tx.Rollback() + } + }() + tx.funcd = true + defer func() { + tx.funcd = false + }() + err = fn(tx) + return +} + +// View executes a function within a managed read-only transaction. +// When a non-nil error is returned from the function that error will be return +// to the caller of View(). +// +// Executing a manual commit or rollback from inside the function will result +// in a panic. +func (db *DB) View(fn func(tx *Tx) error) error { + return db.managed(false, fn) +} + +// Update executes a function within a managed read/write transaction. +// The transaction has been committed when no error is returned. +// In the event that an error is returned, the transaction will be rolled back. +// When a non-nil error is returned from the function, the transaction will be +// rolled back and the that error will be return to the caller of Update(). +// +// Executing a manual commit or rollback from inside the function will result +// in a panic. +func (db *DB) Update(fn func(tx *Tx) error) error { + return db.managed(true, fn) +} + +// get return an item or nil if not found. +func (db *DB) get(key string) *dbItem { + item := db.keys.Get(&dbItem{key: key}) + if item != nil { + return item.(*dbItem) + } + return nil +} + +// Tx represents a transaction on the database. This transaction can either be +// read-only or read/write. Read-only transactions can be used for retrieving +// values for keys and iterating through keys and values. Read/write +// transactions can set and delete keys. +// +// All transactions must be committed or rolled-back when done. +type Tx struct { + db *DB // the underlying database. + writable bool // when false mutable operations fail. + funcd bool // when true Commit and Rollback panic. + wc *txWriteContext // context for writable transactions. +} + +type txWriteContext struct { + // rollback when deleteAll is called + rbkeys *btree.BTree // a tree of all item ordered by key + rbexps *btree.BTree // a tree of items ordered by expiration + rbidxs map[string]*index // the index trees. + + rollbackItems map[string]*dbItem // details for rolling back tx. + commitItems map[string]*dbItem // details for committing tx. + itercount int // stack of iterators + rollbackIndexes map[string]*index // details for dropped indexes. +} + +// DeleteAll deletes all items from the database. +func (tx *Tx) DeleteAll() error { + if tx.db == nil { + return ErrTxClosed + } else if !tx.writable { + return ErrTxNotWritable + } else if tx.wc.itercount > 0 { + return ErrTxIterating + } + + // check to see if we've already deleted everything + if tx.wc.rbkeys == nil { + // we need to backup the live data in case of a rollback. + tx.wc.rbkeys = tx.db.keys + tx.wc.rbexps = tx.db.exps + tx.wc.rbidxs = tx.db.idxs + } + + // now reset the live database trees + tx.db.keys = btree.New(btreeDegrees, nil) + tx.db.exps = btree.New(btreeDegrees, &exctx{tx.db}) + tx.db.idxs = make(map[string]*index) + + // finally re-create the indexes + for name, idx := range tx.wc.rbidxs { + tx.db.idxs[name] = idx.clearCopy() + } + + // always clear out the commits + tx.wc.commitItems = make(map[string]*dbItem) + + return nil +} + +// Begin opens a new transaction. +// Multiple read-only transactions can be opened at the same time but there can +// only be one read/write transaction at a time. Attempting to open a read/write +// transactions while another one is in progress will result in blocking until +// the current read/write transaction is completed. +// +// All transactions must be closed by calling Commit() or Rollback() when done. +func (db *DB) Begin(writable bool) (*Tx, error) { + tx := &Tx{ + db: db, + writable: writable, + } + tx.lock() + if db.closed { + tx.unlock() + return nil, ErrDatabaseClosed + } + if writable { + // writable transactions have a writeContext object that + // contains information about changes to the database. + tx.wc = &txWriteContext{} + tx.wc.rollbackItems = make(map[string]*dbItem) + tx.wc.rollbackIndexes = make(map[string]*index) + if db.persist { + tx.wc.commitItems = make(map[string]*dbItem) + } + } + return tx, nil +} + +// lock locks the database based on the transaction type. +func (tx *Tx) lock() { + if tx.writable { + tx.db.mu.Lock() + } else { + tx.db.mu.RLock() + } +} + +// unlock unlocks the database based on the transaction type. +func (tx *Tx) unlock() { + if tx.writable { + tx.db.mu.Unlock() + } else { + tx.db.mu.RUnlock() + } +} + +// rollbackInner handles the underlying rollback logic. +// Intended to be called from Commit() and Rollback(). +func (tx *Tx) rollbackInner() { + // rollback the deleteAll if needed + if tx.wc.rbkeys != nil { + tx.db.keys = tx.wc.rbkeys + tx.db.idxs = tx.wc.rbidxs + tx.db.exps = tx.wc.rbexps + } + for key, item := range tx.wc.rollbackItems { + tx.db.deleteFromDatabase(&dbItem{key: key}) + if item != nil { + // When an item is not nil, we will need to reinsert that item + // into the database overwriting the current one. + tx.db.insertIntoDatabase(item) + } + } + for name, idx := range tx.wc.rollbackIndexes { + delete(tx.db.idxs, name) + if idx != nil { + // When an index is not nil, we will need to rebuilt that index + // this could be an expensive process if the database has many + // items or the index is complex. + tx.db.idxs[name] = idx + idx.rebuild() + } + } +} + +// Commit writes all changes to disk. +// An error is returned when a write error occurs, or when a Commit() is called +// from a read-only transaction. +func (tx *Tx) Commit() error { + if tx.funcd { + panic("managed tx commit not allowed") + } + if tx.db == nil { + return ErrTxClosed + } else if !tx.writable { + return ErrTxNotWritable + } + var err error + if tx.db.persist && (len(tx.wc.commitItems) > 0 || tx.wc.rbkeys != nil) { + tx.db.buf = tx.db.buf[:0] + // write a flushdb if a deleteAll was called. + if tx.wc.rbkeys != nil { + tx.db.buf = append(tx.db.buf, "*1\r\n$7\r\nflushdb\r\n"...) + } + // Each committed record is written to disk + for key, item := range tx.wc.commitItems { + if item == nil { + tx.db.buf = (&dbItem{key: key}).writeDeleteTo(tx.db.buf) + } else { + tx.db.buf = item.writeSetTo(tx.db.buf) + } + } + // Flushing the buffer only once per transaction. + // If this operation fails then the write did failed and we must + // rollback. + if _, err = tx.db.file.Write(tx.db.buf); err != nil { + tx.rollbackInner() + } + if tx.db.config.SyncPolicy == Always { + _ = tx.db.file.Sync() + } + // Increment the number of flushes. The background syncing uses this. + tx.db.flushes++ + } + // Unlock the database and allow for another writable transaction. + tx.unlock() + // Clear the db field to disable this transaction from future use. + tx.db = nil + return err +} + +// Rollback closes the transaction and reverts all mutable operations that +// were performed on the transaction such as Set() and Delete(). +// +// Read-only transactions can only be rolled back, not committed. +func (tx *Tx) Rollback() error { + if tx.funcd { + panic("managed tx rollback not allowed") + } + if tx.db == nil { + return ErrTxClosed + } + // The rollback func does the heavy lifting. + if tx.writable { + tx.rollbackInner() + } + // unlock the database for more transactions. + tx.unlock() + // Clear the db field to disable this transaction from future use. + tx.db = nil + return nil +} + +// dbItemOpts holds various meta information about an item. +type dbItemOpts struct { + ex bool // does this item expire? + exat time.Time // when does this item expire? +} +type dbItem struct { + key, val string // the binary key and value + opts *dbItemOpts // optional meta information + keyless bool // keyless item for scanning +} + +func appendArray(buf []byte, count int) []byte { + buf = append(buf, '*') + buf = append(buf, strconv.FormatInt(int64(count), 10)...) + buf = append(buf, '\r', '\n') + return buf +} + +func appendBulkString(buf []byte, s string) []byte { + buf = append(buf, '$') + buf = append(buf, strconv.FormatInt(int64(len(s)), 10)...) + buf = append(buf, '\r', '\n') + buf = append(buf, s...) + buf = append(buf, '\r', '\n') + return buf +} + +// writeSetTo writes an item as a single SET record to the a bufio Writer. +func (dbi *dbItem) writeSetTo(buf []byte) []byte { + if dbi.opts != nil && dbi.opts.ex { + ex := dbi.opts.exat.Sub(time.Now()) / time.Second + buf = appendArray(buf, 5) + buf = appendBulkString(buf, "set") + buf = appendBulkString(buf, dbi.key) + buf = appendBulkString(buf, dbi.val) + buf = appendBulkString(buf, "ex") + buf = appendBulkString(buf, strconv.FormatUint(uint64(ex), 10)) + } else { + buf = appendArray(buf, 3) + buf = appendBulkString(buf, "set") + buf = appendBulkString(buf, dbi.key) + buf = appendBulkString(buf, dbi.val) + } + return buf +} + +// writeSetTo writes an item as a single DEL record to the a bufio Writer. +func (dbi *dbItem) writeDeleteTo(buf []byte) []byte { + buf = appendArray(buf, 2) + buf = appendBulkString(buf, "del") + buf = appendBulkString(buf, dbi.key) + return buf +} + +// expired evaluates id the item has expired. This will always return false when +// the item does not have `opts.ex` set to true. +func (dbi *dbItem) expired() bool { + return dbi.opts != nil && dbi.opts.ex && time.Now().After(dbi.opts.exat) +} + +// MaxTime from http://stackoverflow.com/questions/25065055#32620397 +// This is a long time in the future. It's an imaginary number that is +// used for b-tree ordering. +var maxTime = time.Unix(1<<63-62135596801, 999999999) + +// expiresAt will return the time when the item will expire. When an item does +// not expire `maxTime` is used. +func (dbi *dbItem) expiresAt() time.Time { + if dbi.opts == nil || !dbi.opts.ex { + return maxTime + } + return dbi.opts.exat +} + +// Less determines if a b-tree item is less than another. This is required +// for ordering, inserting, and deleting items from a b-tree. It's important +// to note that the ctx parameter is used to help with determine which +// formula to use on an item. Each b-tree should use a different ctx when +// sharing the same item. +func (dbi *dbItem) Less(item btree.Item, ctx interface{}) bool { + dbi2 := item.(*dbItem) + switch ctx := ctx.(type) { + case *exctx: + // The expires b-tree formula + if dbi2.expiresAt().After(dbi.expiresAt()) { + return true + } + if dbi.expiresAt().After(dbi2.expiresAt()) { + return false + } + case *index: + if ctx.less != nil { + // Using an index + if ctx.less(dbi.val, dbi2.val) { + return true + } + if ctx.less(dbi2.val, dbi.val) { + return false + } + } + } + // Always fall back to the key comparison. This creates absolute uniqueness. + if dbi.keyless { + return false + } else if dbi2.keyless { + return true + } + return dbi.key < dbi2.key +} + +// Rect converts a string to a rectangle. +// An invalid rectangle will cause a panic. +func (dbi *dbItem) Rect(ctx interface{}) (min, max []float64) { + switch ctx := ctx.(type) { + case *index: + return ctx.rect(dbi.val) + } + return nil, nil +} + +// SetOptions represents options that may be included with the Set() command. +type SetOptions struct { + // Expires indicates that the Set() key-value will expire + Expires bool + // TTL is how much time the key-value will exist in the database + // before being evicted. The Expires field must also be set to true. + // TTL stands for Time-To-Live. + TTL time.Duration +} + +// GetLess returns the less function for an index. This is handy for +// doing ad-hoc compares inside a transaction. +// Returns ErrNotFound if the index is not found or there is no less +// function bound to the index +func (tx *Tx) GetLess(index string) (func(a, b string) bool, error) { + if tx.db == nil { + return nil, ErrTxClosed + } + idx, ok := tx.db.idxs[index] + if !ok || idx.less == nil { + return nil, ErrNotFound + } + return idx.less, nil +} + +// GetRect returns the rect function for an index. This is handy for +// doing ad-hoc searches inside a transaction. +// Returns ErrNotFound if the index is not found or there is no rect +// function bound to the index +func (tx *Tx) GetRect(index string) (func(s string) (min, max []float64), + error) { + if tx.db == nil { + return nil, ErrTxClosed + } + idx, ok := tx.db.idxs[index] + if !ok || idx.rect == nil { + return nil, ErrNotFound + } + return idx.rect, nil +} + +// Set inserts or replaces an item in the database based on the key. +// The opt params may be used for additional functionality such as forcing +// the item to be evicted at a specified time. When the return value +// for err is nil the operation succeeded. When the return value of +// replaced is true, then the operaton replaced an existing item whose +// value will be returned through the previousValue variable. +// The results of this operation will not be available to other +// transactions until the current transaction has successfully committed. +// +// Only a writable transaction can be used with this operation. +// This operation is not allowed during iterations such as Ascend* & Descend*. +func (tx *Tx) Set(key, value string, opts *SetOptions) (previousValue string, + replaced bool, err error) { + if tx.db == nil { + return "", false, ErrTxClosed + } else if !tx.writable { + return "", false, ErrTxNotWritable + } else if tx.wc.itercount > 0 { + return "", false, ErrTxIterating + } + item := &dbItem{key: key, val: value} + if opts != nil { + if opts.Expires { + // The caller is requesting that this item expires. Convert the + // TTL to an absolute time and bind it to the item. + item.opts = &dbItemOpts{ex: true, exat: time.Now().Add(opts.TTL)} + } + } + // Insert the item into the keys tree. + prev := tx.db.insertIntoDatabase(item) + + // insert into the rollback map if there has not been a deleteAll. + if tx.wc.rbkeys == nil { + if prev == nil { + // An item with the same key did not previously exist. Let's + // create a rollback entry with a nil value. A nil value indicates + // that the entry should be deleted on rollback. When the value is + // *not* nil, that means the entry should be reverted. + tx.wc.rollbackItems[key] = nil + } else { + // A previous item already exists in the database. Let's create a + // rollback entry with the item as the value. We need to check the + // map to see if there isn't already an item that matches the + // same key. + if _, ok := tx.wc.rollbackItems[key]; !ok { + tx.wc.rollbackItems[key] = prev + } + if !prev.expired() { + previousValue, replaced = prev.val, true + } + } + } + // For commits we simply assign the item to the map. We use this map to + // write the entry to disk. + if tx.db.persist { + tx.wc.commitItems[key] = item + } + return previousValue, replaced, nil +} + +// Get returns a value for a key. If the item does not exist or if the item +// has expired then ErrNotFound is returned. If ignoreExpired is true, then +// the found value will be returned even if it is expired. +func (tx *Tx) Get(key string, ignoreExpired ...bool) (val string, err error) { + if tx.db == nil { + return "", ErrTxClosed + } + var ignore bool + if len(ignoreExpired) != 0 { + ignore = ignoreExpired[0] + } + item := tx.db.get(key) + if item == nil || (item.expired() && !ignore) { + // The item does not exists or has expired. Let's assume that + // the caller is only interested in items that have not expired. + return "", ErrNotFound + } + return item.val, nil +} + +// Delete removes an item from the database based on the item's key. If the item +// does not exist or if the item has expired then ErrNotFound is returned. +// +// Only a writable transaction can be used for this operation. +// This operation is not allowed during iterations such as Ascend* & Descend*. +func (tx *Tx) Delete(key string) (val string, err error) { + if tx.db == nil { + return "", ErrTxClosed + } else if !tx.writable { + return "", ErrTxNotWritable + } else if tx.wc.itercount > 0 { + return "", ErrTxIterating + } + item := tx.db.deleteFromDatabase(&dbItem{key: key}) + if item == nil { + return "", ErrNotFound + } + // create a rollback entry if there has not been a deleteAll call. + if tx.wc.rbkeys == nil { + if _, ok := tx.wc.rollbackItems[key]; !ok { + tx.wc.rollbackItems[key] = item + } + } + if tx.db.persist { + tx.wc.commitItems[key] = nil + } + // Even though the item has been deleted, we still want to check + // if it has expired. An expired item should not be returned. + if item.expired() { + // The item exists in the tree, but has expired. Let's assume that + // the caller is only interested in items that have not expired. + return "", ErrNotFound + } + return item.val, nil +} + +// TTL returns the remaining time-to-live for an item. +// A negative duration will be returned for items that do not have an +// expiration. +func (tx *Tx) TTL(key string) (time.Duration, error) { + if tx.db == nil { + return 0, ErrTxClosed + } + item := tx.db.get(key) + if item == nil { + return 0, ErrNotFound + } else if item.opts == nil || !item.opts.ex { + return -1, nil + } + dur := item.opts.exat.Sub(time.Now()) + if dur < 0 { + return 0, ErrNotFound + } + return dur, nil +} + +// scan iterates through a specified index and calls user-defined iterator +// function for each item encountered. +// The desc param indicates that the iterator should descend. +// The gt param indicates that there is a greaterThan limit. +// The lt param indicates that there is a lessThan limit. +// The index param tells the scanner to use the specified index tree. An +// empty string for the index means to scan the keys, not the values. +// The start and stop params are the greaterThan, lessThan limits. For +// descending order, these will be lessThan, greaterThan. +// An error will be returned if the tx is closed or the index is not found. +func (tx *Tx) scan(desc, gt, lt bool, index, start, stop string, + iterator func(key, value string) bool) error { + if tx.db == nil { + return ErrTxClosed + } + // wrap a btree specific iterator around the user-defined iterator. + iter := func(item btree.Item) bool { + dbi := item.(*dbItem) + return iterator(dbi.key, dbi.val) + } + var tr *btree.BTree + if index == "" { + // empty index means we will use the keys tree. + tr = tx.db.keys + } else { + idx := tx.db.idxs[index] + if idx == nil { + // index was not found. return error + return ErrNotFound + } + tr = idx.btr + if tr == nil { + return nil + } + } + // create some limit items + var itemA, itemB *dbItem + if gt || lt { + if index == "" { + itemA = &dbItem{key: start} + itemB = &dbItem{key: stop} + } else { + itemA = &dbItem{val: start} + itemB = &dbItem{val: stop} + if desc { + itemA.keyless = true + itemB.keyless = true + } + } + } + // execute the scan on the underlying tree. + if tx.wc != nil { + tx.wc.itercount++ + defer func() { + tx.wc.itercount-- + }() + } + if desc { + if gt { + if lt { + tr.DescendRange(itemA, itemB, iter) + } else { + tr.DescendGreaterThan(itemA, iter) + } + } else if lt { + tr.DescendLessOrEqual(itemA, iter) + } else { + tr.Descend(iter) + } + } else { + if gt { + if lt { + tr.AscendRange(itemA, itemB, iter) + } else { + tr.AscendGreaterOrEqual(itemA, iter) + } + } else if lt { + tr.AscendLessThan(itemA, iter) + } else { + tr.Ascend(iter) + } + } + return nil +} + +// Match returns true if the specified key matches the pattern. This is a very +// simple pattern matcher where '*' matches on any number characters and '?' +// matches on any one character. +func Match(key, pattern string) bool { + return match.Match(key, pattern) +} + +// AscendKeys allows for iterating through keys based on the specified pattern. +func (tx *Tx) AscendKeys(pattern string, + iterator func(key, value string) bool) error { + if pattern == "" { + return nil + } + if pattern[0] == '*' { + if pattern == "*" { + return tx.Ascend("", iterator) + } + return tx.Ascend("", func(key, value string) bool { + if match.Match(key, pattern) { + if !iterator(key, value) { + return false + } + } + return true + }) + } + min, max := match.Allowable(pattern) + return tx.AscendGreaterOrEqual("", min, func(key, value string) bool { + if key > max { + return false + } + if match.Match(key, pattern) { + if !iterator(key, value) { + return false + } + } + return true + }) +} + +// DescendKeys allows for iterating through keys based on the specified pattern. +func (tx *Tx) DescendKeys(pattern string, + iterator func(key, value string) bool) error { + if pattern == "" { + return nil + } + if pattern[0] == '*' { + if pattern == "*" { + return tx.Descend("", iterator) + } + return tx.Descend("", func(key, value string) bool { + if match.Match(key, pattern) { + if !iterator(key, value) { + return false + } + } + return true + }) + } + min, max := match.Allowable(pattern) + return tx.DescendLessOrEqual("", max, func(key, value string) bool { + if key < min { + return false + } + if match.Match(key, pattern) { + if !iterator(key, value) { + return false + } + } + return true + }) +} + +// Ascend calls the iterator for every item in the database within the range +// [first, last], until iterator returns false. +// When an index is provided, the results will be ordered by the item values +// as specified by the less() function of the defined index. +// When an index is not provided, the results will be ordered by the item key. +// An invalid index will return an error. +func (tx *Tx) Ascend(index string, + iterator func(key, value string) bool) error { + return tx.scan(false, false, false, index, "", "", iterator) +} + +// AscendGreaterOrEqual calls the iterator for every item in the database within +// the range [pivot, last], until iterator returns false. +// When an index is provided, the results will be ordered by the item values +// as specified by the less() function of the defined index. +// When an index is not provided, the results will be ordered by the item key. +// An invalid index will return an error. +func (tx *Tx) AscendGreaterOrEqual(index, pivot string, + iterator func(key, value string) bool) error { + return tx.scan(false, true, false, index, pivot, "", iterator) +} + +// AscendLessThan calls the iterator for every item in the database within the +// range [first, pivot), until iterator returns false. +// When an index is provided, the results will be ordered by the item values +// as specified by the less() function of the defined index. +// When an index is not provided, the results will be ordered by the item key. +// An invalid index will return an error. +func (tx *Tx) AscendLessThan(index, pivot string, + iterator func(key, value string) bool) error { + return tx.scan(false, false, true, index, pivot, "", iterator) +} + +// AscendRange calls the iterator for every item in the database within +// the range [greaterOrEqual, lessThan), until iterator returns false. +// When an index is provided, the results will be ordered by the item values +// as specified by the less() function of the defined index. +// When an index is not provided, the results will be ordered by the item key. +// An invalid index will return an error. +func (tx *Tx) AscendRange(index, greaterOrEqual, lessThan string, + iterator func(key, value string) bool) error { + return tx.scan( + false, true, true, index, greaterOrEqual, lessThan, iterator, + ) +} + +// Descend calls the iterator for every item in the database within the range +// [last, first], until iterator returns false. +// When an index is provided, the results will be ordered by the item values +// as specified by the less() function of the defined index. +// When an index is not provided, the results will be ordered by the item key. +// An invalid index will return an error. +func (tx *Tx) Descend(index string, + iterator func(key, value string) bool) error { + return tx.scan(true, false, false, index, "", "", iterator) +} + +// DescendGreaterThan calls the iterator for every item in the database within +// the range [last, pivot), until iterator returns false. +// When an index is provided, the results will be ordered by the item values +// as specified by the less() function of the defined index. +// When an index is not provided, the results will be ordered by the item key. +// An invalid index will return an error. +func (tx *Tx) DescendGreaterThan(index, pivot string, + iterator func(key, value string) bool) error { + return tx.scan(true, true, false, index, pivot, "", iterator) +} + +// DescendLessOrEqual calls the iterator for every item in the database within +// the range [pivot, first], until iterator returns false. +// When an index is provided, the results will be ordered by the item values +// as specified by the less() function of the defined index. +// When an index is not provided, the results will be ordered by the item key. +// An invalid index will return an error. +func (tx *Tx) DescendLessOrEqual(index, pivot string, + iterator func(key, value string) bool) error { + return tx.scan(true, false, true, index, pivot, "", iterator) +} + +// DescendRange calls the iterator for every item in the database within +// the range [lessOrEqual, greaterThan), until iterator returns false. +// When an index is provided, the results will be ordered by the item values +// as specified by the less() function of the defined index. +// When an index is not provided, the results will be ordered by the item key. +// An invalid index will return an error. +func (tx *Tx) DescendRange(index, lessOrEqual, greaterThan string, + iterator func(key, value string) bool) error { + return tx.scan( + true, true, true, index, lessOrEqual, greaterThan, iterator, + ) +} + +// AscendEqual calls the iterator for every item in the database that equals +// pivot, until iterator returns false. +// When an index is provided, the results will be ordered by the item values +// as specified by the less() function of the defined index. +// When an index is not provided, the results will be ordered by the item key. +// An invalid index will return an error. +func (tx *Tx) AscendEqual(index, pivot string, + iterator func(key, value string) bool) error { + var err error + var less func(a, b string) bool + if index != "" { + less, err = tx.GetLess(index) + if err != nil { + return err + } + } + return tx.AscendGreaterOrEqual(index, pivot, func(key, value string) bool { + if less == nil { + if key != pivot { + return false + } + } else if less(pivot, value) { + return false + } + return iterator(key, value) + }) +} + +// DescendEqual calls the iterator for every item in the database that equals +// pivot, until iterator returns false. +// When an index is provided, the results will be ordered by the item values +// as specified by the less() function of the defined index. +// When an index is not provided, the results will be ordered by the item key. +// An invalid index will return an error. +func (tx *Tx) DescendEqual(index, pivot string, + iterator func(key, value string) bool) error { + var err error + var less func(a, b string) bool + if index != "" { + less, err = tx.GetLess(index) + if err != nil { + return err + } + } + return tx.DescendLessOrEqual(index, pivot, func(key, value string) bool { + if less == nil { + if key != pivot { + return false + } + } else if less(value, pivot) { + return false + } + return iterator(key, value) + }) +} + +// rect is used by Intersects and Nearby +type rect struct { + min, max []float64 +} + +func (r *rect) Rect(ctx interface{}) (min, max []float64) { + return r.min, r.max +} + +// Nearby searches for rectangle items that are nearby a target rect. +// All items belonging to the specified index will be returned in order of +// nearest to farthest. +// The specified index must have been created by AddIndex() and the target +// is represented by the rect string. This string will be processed by the +// same bounds function that was passed to the CreateSpatialIndex() function. +// An invalid index will return an error. +// The dist param is the distance of the bounding boxes. In the case of +// simple 2D points, it's the distance of the two 2D points squared. +func (tx *Tx) Nearby(index, bounds string, + iterator func(key, value string, dist float64) bool) error { + if tx.db == nil { + return ErrTxClosed + } + if index == "" { + // cannot search on keys tree. just return nil. + return nil + } + // // wrap a rtree specific iterator around the user-defined iterator. + iter := func(item rtree.Item, dist float64) bool { + dbi := item.(*dbItem) + return iterator(dbi.key, dbi.val, dist) + } + idx := tx.db.idxs[index] + if idx == nil { + // index was not found. return error + return ErrNotFound + } + if idx.rtr == nil { + // not an r-tree index. just return nil + return nil + } + // execute the nearby search + var min, max []float64 + if idx.rect != nil { + min, max = idx.rect(bounds) + } + // set the center param to false, which uses the box dist calc. + idx.rtr.KNN(&rect{min, max}, false, iter) + return nil +} + +// Intersects searches for rectangle items that intersect a target rect. +// The specified index must have been created by AddIndex() and the target +// is represented by the rect string. This string will be processed by the +// same bounds function that was passed to the CreateSpatialIndex() function. +// An invalid index will return an error. +func (tx *Tx) Intersects(index, bounds string, + iterator func(key, value string) bool) error { + if tx.db == nil { + return ErrTxClosed + } + if index == "" { + // cannot search on keys tree. just return nil. + return nil + } + // wrap a rtree specific iterator around the user-defined iterator. + iter := func(item rtree.Item) bool { + dbi := item.(*dbItem) + return iterator(dbi.key, dbi.val) + } + idx := tx.db.idxs[index] + if idx == nil { + // index was not found. return error + return ErrNotFound + } + if idx.rtr == nil { + // not an r-tree index. just return nil + return nil + } + // execute the search + var min, max []float64 + if idx.rect != nil { + min, max = idx.rect(bounds) + } + idx.rtr.Search(&rect{min, max}, iter) + return nil +} + +// Len returns the number of items in the database +func (tx *Tx) Len() (int, error) { + if tx.db == nil { + return 0, ErrTxClosed + } + return tx.db.keys.Len(), nil +} + +// IndexOptions provides an index with additional features or +// alternate functionality. +type IndexOptions struct { + // CaseInsensitiveKeyMatching allow for case-insensitive + // matching on keys when setting key/values. + CaseInsensitiveKeyMatching bool +} + +// CreateIndex builds a new index and populates it with items. +// The items are ordered in an b-tree and can be retrieved using the +// Ascend* and Descend* methods. +// An error will occur if an index with the same name already exists. +// +// When a pattern is provided, the index will be populated with +// keys that match the specified pattern. This is a very simple pattern +// match where '*' matches on any number characters and '?' matches on +// any one character. +// The less function compares if string 'a' is less than string 'b'. +// It allows for indexes to create custom ordering. It's possible +// that the strings may be textual or binary. It's up to the provided +// less function to handle the content format and comparison. +// There are some default less function that can be used such as +// IndexString, IndexBinary, etc. +func (tx *Tx) CreateIndex(name, pattern string, + less ...func(a, b string) bool) error { + return tx.createIndex(name, pattern, less, nil, nil) +} + +// CreateIndexOptions is the same as CreateIndex except that it allows +// for additional options. +func (tx *Tx) CreateIndexOptions(name, pattern string, + opts *IndexOptions, + less ...func(a, b string) bool) error { + return tx.createIndex(name, pattern, less, nil, opts) +} + +// CreateSpatialIndex builds a new index and populates it with items. +// The items are organized in an r-tree and can be retrieved using the +// Intersects method. +// An error will occur if an index with the same name already exists. +// +// The rect function converts a string to a rectangle. The rectangle is +// represented by two arrays, min and max. Both arrays may have a length +// between 1 and 20, and both arrays must match in length. A length of 1 is a +// one dimensional rectangle, and a length of 4 is a four dimension rectangle. +// There is support for up to 20 dimensions. +// The values of min must be less than the values of max at the same dimension. +// Thus min[0] must be less-than-or-equal-to max[0]. +// The IndexRect is a default function that can be used for the rect +// parameter. +func (tx *Tx) CreateSpatialIndex(name, pattern string, + rect func(item string) (min, max []float64)) error { + return tx.createIndex(name, pattern, nil, rect, nil) +} + +// CreateSpatialIndexOptions is the same as CreateSpatialIndex except that +// it allows for additional options. +func (tx *Tx) CreateSpatialIndexOptions(name, pattern string, + opts *IndexOptions, + rect func(item string) (min, max []float64)) error { + return tx.createIndex(name, pattern, nil, rect, nil) +} + +// createIndex is called by CreateIndex() and CreateSpatialIndex() +func (tx *Tx) createIndex(name string, pattern string, + lessers []func(a, b string) bool, + rect func(item string) (min, max []float64), + opts *IndexOptions, +) error { + if tx.db == nil { + return ErrTxClosed + } else if !tx.writable { + return ErrTxNotWritable + } else if tx.wc.itercount > 0 { + return ErrTxIterating + } + if name == "" { + // cannot create an index without a name. + // an empty name index is designated for the main "keys" tree. + return ErrIndexExists + } + // check if an index with that name already exists. + if _, ok := tx.db.idxs[name]; ok { + // index with name already exists. error. + return ErrIndexExists + } + // genreate a less function + var less func(a, b string) bool + switch len(lessers) { + default: + // multiple less functions specified. + // create a compound less function. + less = func(a, b string) bool { + for i := 0; i < len(lessers)-1; i++ { + if lessers[i](a, b) { + return true + } + if lessers[i](b, a) { + return false + } + } + return lessers[len(lessers)-1](a, b) + } + case 0: + // no less function + case 1: + less = lessers[0] + } + var sopts IndexOptions + if opts != nil { + sopts = *opts + } + if sopts.CaseInsensitiveKeyMatching { + pattern = strings.ToLower(pattern) + } + // intialize new index + idx := &index{ + name: name, + pattern: pattern, + less: less, + rect: rect, + db: tx.db, + opts: sopts, + } + idx.rebuild() + // save the index + tx.db.idxs[name] = idx + if tx.wc.rbkeys == nil { + // store the index in the rollback map. + if _, ok := tx.wc.rollbackIndexes[name]; !ok { + // we use nil to indicate that the index should be removed upon rollback. + tx.wc.rollbackIndexes[name] = nil + } + } + return nil +} + +// DropIndex removes an index. +func (tx *Tx) DropIndex(name string) error { + if tx.db == nil { + return ErrTxClosed + } else if !tx.writable { + return ErrTxNotWritable + } else if tx.wc.itercount > 0 { + return ErrTxIterating + } + if name == "" { + // cannot drop the default "keys" index + return ErrInvalidOperation + } + idx, ok := tx.db.idxs[name] + if !ok { + return ErrNotFound + } + // delete from the map. + // this is all that is needed to delete an index. + delete(tx.db.idxs, name) + if tx.wc.rbkeys == nil { + // store the index in the rollback map. + if _, ok := tx.wc.rollbackIndexes[name]; !ok { + // we use a non-nil copy of the index without the data to indicate that the + // index should be rebuilt upon rollback. + tx.wc.rollbackIndexes[name] = idx.clearCopy() + } + } + return nil +} + +// Indexes returns a list of index names. +func (tx *Tx) Indexes() ([]string, error) { + if tx.db == nil { + return nil, ErrTxClosed + } + names := make([]string, 0, len(tx.db.idxs)) + for name := range tx.db.idxs { + names = append(names, name) + } + sort.Strings(names) + return names, nil +} + +// Rect is helper function that returns a string representation +// of a rect. IndexRect() is the reverse function and can be used +// to generate a rect from a string. +func Rect(min, max []float64) string { + r := grect.Rect{Min: min, Max: max} + return r.String() +} + +// Point is a helper function that converts a series of float64s +// to a rectangle for a spatial index. +func Point(coords ...float64) string { + return Rect(coords, coords) +} + +// IndexRect is a helper function that converts string to a rect. +// Rect() is the reverse function and can be used to generate a string +// from a rect. +func IndexRect(a string) (min, max []float64) { + r := grect.Get(a) + return r.Min, r.Max +} + +// IndexString is a helper function that return true if 'a' is less than 'b'. +// This is a case-insensitive comparison. Use the IndexBinary() for comparing +// case-sensitive strings. +func IndexString(a, b string) bool { + for i := 0; i < len(a) && i < len(b); i++ { + if a[i] >= 'A' && a[i] <= 'Z' { + if b[i] >= 'A' && b[i] <= 'Z' { + // both are uppercase, do nothing + if a[i] < b[i] { + return true + } else if a[i] > b[i] { + return false + } + } else { + // a is uppercase, convert a to lowercase + if a[i]+32 < b[i] { + return true + } else if a[i]+32 > b[i] { + return false + } + } + } else if b[i] >= 'A' && b[i] <= 'Z' { + // b is uppercase, convert b to lowercase + if a[i] < b[i]+32 { + return true + } else if a[i] > b[i]+32 { + return false + } + } else { + // neither are uppercase + if a[i] < b[i] { + return true + } else if a[i] > b[i] { + return false + } + } + } + return len(a) < len(b) +} + +// IndexBinary is a helper function that returns true if 'a' is less than 'b'. +// This compares the raw binary of the string. +func IndexBinary(a, b string) bool { + return a < b +} + +// IndexInt is a helper function that returns true if 'a' is less than 'b'. +func IndexInt(a, b string) bool { + ia, _ := strconv.ParseInt(a, 10, 64) + ib, _ := strconv.ParseInt(b, 10, 64) + return ia < ib +} + +// IndexUint is a helper function that returns true if 'a' is less than 'b'. +// This compares uint64s that are added to the database using the +// Uint() conversion function. +func IndexUint(a, b string) bool { + ia, _ := strconv.ParseUint(a, 10, 64) + ib, _ := strconv.ParseUint(b, 10, 64) + return ia < ib +} + +// IndexFloat is a helper function that returns true if 'a' is less than 'b'. +// This compares float64s that are added to the database using the +// Float() conversion function. +func IndexFloat(a, b string) bool { + ia, _ := strconv.ParseFloat(a, 64) + ib, _ := strconv.ParseFloat(b, 64) + return ia < ib +} + +// IndexJSON provides for the ability to create an index on any JSON field. +// When the field is a string, the comparison will be case-insensitive. +// It returns a helper function used by CreateIndex. +func IndexJSON(path string) func(a, b string) bool { + return func(a, b string) bool { + return gjson.Get(a, path).Less(gjson.Get(b, path), false) + } +} + +// IndexJSONCaseSensitive provides for the ability to create an index on +// any JSON field. +// When the field is a string, the comparison will be case-sensitive. +// It returns a helper function used by CreateIndex. +func IndexJSONCaseSensitive(path string) func(a, b string) bool { + return func(a, b string) bool { + return gjson.Get(a, path).Less(gjson.Get(b, path), true) + } +} + +// Desc is a helper function that changes the order of an index. +func Desc(less func(a, b string) bool) func(a, b string) bool { + return func(a, b string) bool { return less(b, a) } +} diff --git a/vendor/github.com/tidwall/buntdb/logo.png b/vendor/github.com/tidwall/buntdb/logo.png new file mode 100644 index 0000000..01c6d75 Binary files /dev/null and b/vendor/github.com/tidwall/buntdb/logo.png differ diff --git a/vendor/github.com/tidwall/gjson/.travis.yml b/vendor/github.com/tidwall/gjson/.travis.yml new file mode 100644 index 0000000..4f2ee4d --- /dev/null +++ b/vendor/github.com/tidwall/gjson/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/github.com/tidwall/gjson/LICENSE b/vendor/github.com/tidwall/gjson/LICENSE new file mode 100644 index 0000000..58f5819 --- /dev/null +++ b/vendor/github.com/tidwall/gjson/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2016 Josh Baker + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/tidwall/gjson/README.md b/vendor/github.com/tidwall/gjson/README.md new file mode 100644 index 0000000..cab0f9f --- /dev/null +++ b/vendor/github.com/tidwall/gjson/README.md @@ -0,0 +1,491 @@ +

+GJSON +
+Build Status +GoDoc +GJSON Playground +

+ + + +

get json values quickly

+ +GJSON is a Go package that provides a [fast](#performance) and [simple](#get-a-value) way to get values from a json document. +It has features such as [one line retrieval](#get-a-value), [dot notation paths](#path-syntax), [iteration](#iterate-through-an-object-or-array), and [parsing json lines](#json-lines). + +Also check out [SJSON](https://github.com/tidwall/sjson) for modifying json, and the [JJ](https://github.com/tidwall/jj) command line tool. + +Getting Started +=============== + +## Installing + +To start using GJSON, install Go and run `go get`: + +```sh +$ go get -u github.com/tidwall/gjson +``` + +This will retrieve the library. + +## Get a value +Get searches json for the specified path. A path is in dot syntax, such as "name.last" or "age". When the value is found it's returned immediately. + +```go +package main + +import "github.com/tidwall/gjson" + +const json = `{"name":{"first":"Janet","last":"Prichard"},"age":47}` + +func main() { + value := gjson.Get(json, "name.last") + println(value.String()) +} +``` + +This will print: + +``` +Prichard +``` +*There's also the [GetMany](#get-multiple-values-at-once) function to get multiple values at once, and [GetBytes](#working-with-bytes) for working with JSON byte slices.* + +## Path Syntax + +Below is a quick overview of the path syntax, for more complete information please +check out [GJSON Syntax](SYNTAX.md). + +A path is a series of keys separated by a dot. +A key may contain special wildcard characters '\*' and '?'. +To access an array value use the index as the key. +To get the number of elements in an array or to access a child path, use the '#' character. +The dot and wildcard characters can be escaped with '\\'. + +```json +{ + "name": {"first": "Tom", "last": "Anderson"}, + "age":37, + "children": ["Sara","Alex","Jack"], + "fav.movie": "Deer Hunter", + "friends": [ + {"first": "Dale", "last": "Murphy", "age": 44, "nets": ["ig", "fb", "tw"]}, + {"first": "Roger", "last": "Craig", "age": 68, "nets": ["fb", "tw"]}, + {"first": "Jane", "last": "Murphy", "age": 47, "nets": ["ig", "tw"]} + ] +} +``` +``` +"name.last" >> "Anderson" +"age" >> 37 +"children" >> ["Sara","Alex","Jack"] +"children.#" >> 3 +"children.1" >> "Alex" +"child*.2" >> "Jack" +"c?ildren.0" >> "Sara" +"fav\.movie" >> "Deer Hunter" +"friends.#.first" >> ["Dale","Roger","Jane"] +"friends.1.last" >> "Craig" +``` + +You can also query an array for the first match by using `#(...)`, or find all +matches with `#(...)#`. Queries support the `==`, `!=`, `<`, `<=`, `>`, `>=` +comparison operators and the simple pattern matching `%` (like) and `!%` +(not like) operators. + +``` +friends.#(last=="Murphy").first >> "Dale" +friends.#(last=="Murphy")#.first >> ["Dale","Jane"] +friends.#(age>45)#.last >> ["Craig","Murphy"] +friends.#(first%"D*").last >> "Murphy" +friends.#(first!%"D*").last >> "Craig" +friends.#(nets.#(=="fb"))#.first >> ["Dale","Roger"] +``` + +*Please note that prior to v1.3.0, queries used the `#[...]` brackets. This was +changed in v1.3.0 as to avoid confusion with the new +[multipath](SYNTAX.md#multipaths) syntax. For backwards compatibility, +`#[...]` will continue to work until the next major release.* + +## Result Type + +GJSON supports the json types `string`, `number`, `bool`, and `null`. +Arrays and Objects are returned as their raw json types. + +The `Result` type holds one of these: + +``` +bool, for JSON booleans +float64, for JSON numbers +string, for JSON string literals +nil, for JSON null +``` + +To directly access the value: + +```go +result.Type // can be String, Number, True, False, Null, or JSON +result.Str // holds the string +result.Num // holds the float64 number +result.Raw // holds the raw json +result.Index // index of raw value in original json, zero means index unknown +``` + +There are a variety of handy functions that work on a result: + +```go +result.Exists() bool +result.Value() interface{} +result.Int() int64 +result.Uint() uint64 +result.Float() float64 +result.String() string +result.Bool() bool +result.Time() time.Time +result.Array() []gjson.Result +result.Map() map[string]gjson.Result +result.Get(path string) Result +result.ForEach(iterator func(key, value Result) bool) +result.Less(token Result, caseSensitive bool) bool +``` + +The `result.Value()` function returns an `interface{}` which requires type assertion and is one of the following Go types: + +The `result.Array()` function returns back an array of values. +If the result represents a non-existent value, then an empty array will be returned. +If the result is not a JSON array, the return value will be an array containing one result. + +```go +boolean >> bool +number >> float64 +string >> string +null >> nil +array >> []interface{} +object >> map[string]interface{} +``` + +### 64-bit integers + +The `result.Int()` and `result.Uint()` calls are capable of reading all 64 bits, allowing for large JSON integers. + +```go +result.Int() int64 // -9223372036854775808 to 9223372036854775807 +result.Uint() int64 // 0 to 18446744073709551615 +``` + +## Modifiers and path chaining + +New in version 1.2 is support for modifier functions and path chaining. + +A modifier is a path component that performs custom processing on the +json. + +Multiple paths can be "chained" together using the pipe character. +This is useful for getting results from a modified query. + +For example, using the built-in `@reverse` modifier on the above json document, +we'll get `children` array and reverse the order: + +``` +"children|@reverse" >> ["Jack","Alex","Sara"] +"children|@reverse|0" >> "Jack" +``` + +There are currently three built-in modifiers: + +- `@reverse`: Reverse an array or the members of an object. +- `@ugly`: Remove all whitespace from a json document. +- `@pretty`: Make the json document more human readable. + +### Modifier arguments + +A modifier may accept an optional argument. The argument can be a valid JSON +document or just characters. + +For example, the `@pretty` modifier takes a json object as its argument. + +``` +@pretty:{"sortKeys":true} +``` + +Which makes the json pretty and orders all of its keys. + +```json +{ + "age":37, + "children": ["Sara","Alex","Jack"], + "fav.movie": "Deer Hunter", + "friends": [ + {"age": 44, "first": "Dale", "last": "Murphy"}, + {"age": 68, "first": "Roger", "last": "Craig"}, + {"age": 47, "first": "Jane", "last": "Murphy"} + ], + "name": {"first": "Tom", "last": "Anderson"} +} +``` + +*The full list of `@pretty` options are `sortKeys`, `indent`, `prefix`, and `width`. +Please see [Pretty Options](https://github.com/tidwall/pretty#customized-output) for more information.* + +### Custom modifiers + +You can also add custom modifiers. + +For example, here we create a modifier that makes the entire json document upper +or lower case. + +```go +gjson.AddModifier("case", func(json, arg string) string { + if arg == "upper" { + return strings.ToUpper(json) + } + if arg == "lower" { + return strings.ToLower(json) + } + return json +}) +``` + +``` +"children|@case:upper" >> ["SARA","ALEX","JACK"] +"children|@case:lower|@reverse" >> ["jack","alex","sara"] +``` + +## JSON Lines + +There's support for [JSON Lines](http://jsonlines.org/) using the `..` prefix, which treats a multilined document as an array. + +For example: + +``` +{"name": "Gilbert", "age": 61} +{"name": "Alexa", "age": 34} +{"name": "May", "age": 57} +{"name": "Deloise", "age": 44} +``` + +``` +..# >> 4 +..1 >> {"name": "Alexa", "age": 34} +..3 >> {"name": "Deloise", "age": 44} +..#.name >> ["Gilbert","Alexa","May","Deloise"] +..#(name="May").age >> 57 +``` + +The `ForEachLines` function will iterate through JSON lines. + +```go +gjson.ForEachLine(json, func(line gjson.Result) bool{ + println(line.String()) + return true +}) +``` + +## Get nested array values + +Suppose you want all the last names from the following json: + +```json +{ + "programmers": [ + { + "firstName": "Janet", + "lastName": "McLaughlin", + }, { + "firstName": "Elliotte", + "lastName": "Hunter", + }, { + "firstName": "Jason", + "lastName": "Harold", + } + ] +} +``` + +You would use the path "programmers.#.lastName" like such: + +```go +result := gjson.Get(json, "programmers.#.lastName") +for _, name := range result.Array() { + println(name.String()) +} +``` + +You can also query an object inside an array: + +```go +name := gjson.Get(json, `programmers.#(lastName="Hunter").firstName`) +println(name.String()) // prints "Elliotte" +``` + +## Iterate through an object or array + +The `ForEach` function allows for quickly iterating through an object or array. +The key and value are passed to the iterator function for objects. +Only the value is passed for arrays. +Returning `false` from an iterator will stop iteration. + +```go +result := gjson.Get(json, "programmers") +result.ForEach(func(key, value gjson.Result) bool { + println(value.String()) + return true // keep iterating +}) +``` + +## Simple Parse and Get + +There's a `Parse(json)` function that will do a simple parse, and `result.Get(path)` that will search a result. + +For example, all of these will return the same result: + +```go +gjson.Parse(json).Get("name").Get("last") +gjson.Get(json, "name").Get("last") +gjson.Get(json, "name.last") +``` + +## Check for the existence of a value + +Sometimes you just want to know if a value exists. + +```go +value := gjson.Get(json, "name.last") +if !value.Exists() { + println("no last name") +} else { + println(value.String()) +} + +// Or as one step +if gjson.Get(json, "name.last").Exists() { + println("has a last name") +} +``` + +## Validate JSON + +The `Get*` and `Parse*` functions expects that the json is well-formed. Bad json will not panic, but it may return back unexpected results. + +If you are consuming JSON from an unpredictable source then you may want to validate prior to using GJSON. + +```go +if !gjson.Valid(json) { + return errors.New("invalid json") +} +value := gjson.Get(json, "name.last") +``` + +## Unmarshal to a map + +To unmarshal to a `map[string]interface{}`: + +```go +m, ok := gjson.Parse(json).Value().(map[string]interface{}) +if !ok { + // not a map +} +``` + +## Working with Bytes + +If your JSON is contained in a `[]byte` slice, there's the [GetBytes](https://godoc.org/github.com/tidwall/gjson#GetBytes) function. This is preferred over `Get(string(data), path)`. + +```go +var json []byte = ... +result := gjson.GetBytes(json, path) +``` + +If you are using the `gjson.GetBytes(json, path)` function and you want to avoid converting `result.Raw` to a `[]byte`, then you can use this pattern: + +```go +var json []byte = ... +result := gjson.GetBytes(json, path) +var raw []byte +if result.Index > 0 { + raw = json[result.Index:result.Index+len(result.Raw)] +} else { + raw = []byte(result.Raw) +} +``` + +This is a best-effort no allocation sub slice of the original json. This method utilizes the `result.Index` field, which is the position of the raw data in the original json. It's possible that the value of `result.Index` equals zero, in which case the `result.Raw` is converted to a `[]byte`. + +## Get multiple values at once + +The `GetMany` function can be used to get multiple values at the same time. + +```go +results := gjson.GetMany(json, "name.first", "name.last", "age") +``` + +The return value is a `[]Result`, which will always contain exactly the same number of items as the input paths. + +## Performance + +Benchmarks of GJSON alongside [encoding/json](https://golang.org/pkg/encoding/json/), +[ffjson](https://github.com/pquerna/ffjson), +[EasyJSON](https://github.com/mailru/easyjson), +[jsonparser](https://github.com/buger/jsonparser), +and [json-iterator](https://github.com/json-iterator/go) + +``` +BenchmarkGJSONGet-8 3000000 372 ns/op 0 B/op 0 allocs/op +BenchmarkGJSONUnmarshalMap-8 900000 4154 ns/op 1920 B/op 26 allocs/op +BenchmarkJSONUnmarshalMap-8 600000 9019 ns/op 3048 B/op 69 allocs/op +BenchmarkJSONDecoder-8 300000 14120 ns/op 4224 B/op 184 allocs/op +BenchmarkFFJSONLexer-8 1500000 3111 ns/op 896 B/op 8 allocs/op +BenchmarkEasyJSONLexer-8 3000000 887 ns/op 613 B/op 6 allocs/op +BenchmarkJSONParserGet-8 3000000 499 ns/op 21 B/op 0 allocs/op +BenchmarkJSONIterator-8 3000000 812 ns/op 544 B/op 9 allocs/op +``` + +JSON document used: + +```json +{ + "widget": { + "debug": "on", + "window": { + "title": "Sample Konfabulator Widget", + "name": "main_window", + "width": 500, + "height": 500 + }, + "image": { + "src": "Images/Sun.png", + "hOffset": 250, + "vOffset": 250, + "alignment": "center" + }, + "text": { + "data": "Click Here", + "size": 36, + "style": "bold", + "vOffset": 100, + "alignment": "center", + "onMouseUp": "sun1.opacity = (sun1.opacity / 100) * 90;" + } + } +} +``` + +Each operation was rotated though one of the following search paths: + +``` +widget.window.name +widget.image.hOffset +widget.text.onMouseUp +``` + +*These benchmarks were run on a MacBook Pro 15" 2.8 GHz Intel Core i7 using Go 1.8 and can be be found [here](https://github.com/tidwall/gjson-benchmarks).* + + +## Contact +Josh Baker [@tidwall](http://twitter.com/tidwall) + +## License + +GJSON source code is available under the MIT [License](/LICENSE). diff --git a/vendor/github.com/tidwall/gjson/SYNTAX.md b/vendor/github.com/tidwall/gjson/SYNTAX.md new file mode 100644 index 0000000..dda33af --- /dev/null +++ b/vendor/github.com/tidwall/gjson/SYNTAX.md @@ -0,0 +1,264 @@ +# GJSON Path Syntax + +A GJSON Path is a text string syntax that describes a search pattern for quickly retreiving values from a JSON payload. + +This document is designed to explain the structure of a GJSON Path through examples. + +- [Path structure](#path-structure) +- [Basic](#basic) +- [Wildcards](#wildcards) +- [Escape Character](#escape-character) +- [Arrays](#arrays) +- [Queries](#queries) +- [Dot vs Pipe](#dot-vs-pipe) +- [Modifiers](#modifiers) + +The definitive implemenation is [github.com/tidwall/gjson](https://github.com/tidwall/gjson). +Use the [GJSON Playground](https://gjson.dev) to experiment with the syntax online. + + +## Path structure + +A GJSON Path is intended to be easily expressed as a series of components seperated by a `.` character. + +Along with `.` character, there are a few more that have special meaning, including `|`, `#`, `@`, `\`, `*`, and `?`. + +## Example + +Given this JSON + +```json +{ + "name": {"first": "Tom", "last": "Anderson"}, + "age":37, + "children": ["Sara","Alex","Jack"], + "fav.movie": "Deer Hunter", + "friends": [ + {"first": "Dale", "last": "Murphy", "age": 44, "nets": ["ig", "fb", "tw"]}, + {"first": "Roger", "last": "Craig", "age": 68, "nets": ["fb", "tw"]}, + {"first": "Jane", "last": "Murphy", "age": 47, "nets": ["ig", "tw"]} + ] +} +``` + +The following GJSON Paths evaluate to the accompanying values. + +### Basic + +In many cases you'll just want to retreive values by object name or array index. + +```go +name.last "Anderson" +name.first "Tom" +age 37 +children ["Sara","Alex","Jack"] +children.0 "Sara" +children.1 "Alex" +friends.1 {"first": "Roger", "last": "Craig", "age": 68} +friends.1.first "Roger" +``` + +### Wildcards + +A key may contain the special wildcard characters `*` and `?`. +The `*` will match on any zero+ characters, and `?` matches on any one character. + +```go +child*.2 "Jack" +c?ildren.0 "Sara" +``` + +### Escape character + +Special purpose characters, such as `.`, `*`, and `?` can be escaped with `\`. + +```go +fav\.movie "Deer Hunter" +``` + +### Arrays + +The `#` character allows for digging into JSON Arrays. + +To get the length of an array you'll just use the `#` all by itself. + +```go +friends.# 3 +friends.#.age [44,68,47] +``` + +### Queries + +You can also query an array for the first match by using `#(...)`, or find all matches with `#(...)#`. +Queries support the `==`, `!=`, `<`, `<=`, `>`, `>=` comparison operators, +and the simple pattern matching `%` (like) and `!%` (not like) operators. + +```go +friends.#(last=="Murphy").first "Dale" +friends.#(last=="Murphy")#.first ["Dale","Jane"] +friends.#(age>45)#.last ["Craig","Murphy"] +friends.#(first%"D*").last "Murphy" +friends.#(first!%"D*").last "Craig" +``` + +To query for a non-object value in an array, you can forgo the string to the right of the operator. + +```go +children.#(!%"*a*") "Alex" +children.#(%"*a*")# ["Sara","Jack"] +``` + +Nested queries are allowed. + +```go +friends.#(nets.#(=="fb"))#.first >> ["Dale","Roger"] +``` + +*Please note that prior to v1.3.0, queries used the `#[...]` brackets. This was +changed in v1.3.0 as to avoid confusion with the new [multipath](#multipaths) +syntax. For backwards compatibility, `#[...]` will continue to work until the +next major release.* + +### Dot vs Pipe + +The `.` is standard separator, but it's also possible to use a `|`. +In most cases they both end up returning the same results. +The cases where`|` differs from `.` is when it's used after the `#` for [Arrays](#arrays) and [Queries](#queries). + +Here are some examples + +```go +friends.0.first "Dale" +friends|0.first "Dale" +friends.0|first "Dale" +friends|0|first "Dale" +friends|# 3 +friends.# 3 +friends.#(last="Murphy")# [{"first": "Dale", "last": "Murphy", "age": 44},{"first": "Jane", "last": "Murphy", "age": 47}] +friends.#(last="Murphy")#.first ["Dale","Jane"] +friends.#(last="Murphy")#|first +friends.#(last="Murphy")#.0 [] +friends.#(last="Murphy")#|0 {"first": "Dale", "last": "Murphy", "age": 44} +friends.#(last="Murphy")#.# [] +friends.#(last="Murphy")#|# 2 +``` + +Let's break down a few of these. + +The path `friends.#(last="Murphy")#` all by itself results in + +```json +[{"first": "Dale", "last": "Murphy", "age": 44},{"first": "Jane", "last": "Murphy", "age": 47}] +``` + +The `.first` suffix will process the `first` path on each array element *before* returning the results. Which becomes + +```json +["Dale","Jane"] +``` + +But the `|first` suffix actually processes the `first` path *after* the previous result. +Since the previous result is an array, not an object, it's not possible to process +because `first` does not exist. + +Yet, `|0` suffix returns + +```json +{"first": "Dale", "last": "Murphy", "age": 44} +``` + +Because `0` is the first index of the previous result. + +### Modifiers + +A modifier is a path component that performs custom processing on the JSON. + +For example, using the built-in `@reverse` modifier on the above JSON payload will reverse the `children` array: + +```go +children.@reverse ["Jack","Alex","Sara"] +children.@reverse.0 "Jack" +``` + +There are currently three built-in modifiers: + +- `@reverse`: Reverse an array or the members of an object. +- `@ugly`: Remove all whitespace from JSON. +- `@pretty`: Make the JSON more human readable. + +#### Modifier arguments + +A modifier may accept an optional argument. The argument can be a valid JSON payload or just characters. + +For example, the `@pretty` modifier takes a json object as its argument. + +``` +@pretty:{"sortKeys":true} +``` + +Which makes the json pretty and orders all of its keys. + +```json +{ + "age":37, + "children": ["Sara","Alex","Jack"], + "fav.movie": "Deer Hunter", + "friends": [ + {"age": 44, "first": "Dale", "last": "Murphy"}, + {"age": 68, "first": "Roger", "last": "Craig"}, + {"age": 47, "first": "Jane", "last": "Murphy"} + ], + "name": {"first": "Tom", "last": "Anderson"} +} +``` + +*The full list of `@pretty` options are `sortKeys`, `indent`, `prefix`, and `width`. +Please see [Pretty Options](https://github.com/tidwall/pretty#customized-output) for more information.* + +#### Custom modifiers + +You can also add custom modifiers. + +For example, here we create a modifier which makes the entire JSON payload upper or lower case. + +```go +gjson.AddModifier("case", func(json, arg string) string { + if arg == "upper" { + return strings.ToUpper(json) + } + if arg == "lower" { + return strings.ToLower(json) + } + return json +}) +"children.@case:upper" ["SARA","ALEX","JACK"] +"children.@case:lower.@reverse" ["jack","alex","sara"] +``` + +#### Multipaths + +Starting with v1.3.0, GJSON added the ability to join multiple paths together +to form new documents. Wrapping comma-separated paths between `{...}` or +`[...]` will result in a new array or object, respectively. + +For example, using the given multipath + +``` +{name.first,age,"the_murphys":friends.#(last="Murphy")#.first} +``` + +Here we selected the first name, age, and the first name for friends with the +last name "Murphy". + +You'll notice that an optional key can be provided, in this case +"the_murphys", to force assign a key to a value. Otherwise, the name of the +actual field will be used, in this case "first". If a name cannot be +determined, then "_" is used. + +This results in + +``` +{"first":"Tom","age":37,"the_murphys":["Dale","Jane"]} +``` + + diff --git a/vendor/github.com/tidwall/gjson/gjson.go b/vendor/github.com/tidwall/gjson/gjson.go new file mode 100644 index 0000000..9fe40b5 --- /dev/null +++ b/vendor/github.com/tidwall/gjson/gjson.go @@ -0,0 +1,2799 @@ +// Package gjson provides searching for json strings. +package gjson + +import ( + "encoding/base64" + "encoding/json" + "errors" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + "unicode/utf16" + "unicode/utf8" + + "github.com/tidwall/match" + "github.com/tidwall/pretty" +) + +// Type is Result type +type Type int + +const ( + // Null is a null json value + Null Type = iota + // False is a json false boolean + False + // Number is json number + Number + // String is a json string + String + // True is a json true boolean + True + // JSON is a raw block of JSON + JSON +) + +// String returns a string representation of the type. +func (t Type) String() string { + switch t { + default: + return "" + case Null: + return "Null" + case False: + return "False" + case Number: + return "Number" + case String: + return "String" + case True: + return "True" + case JSON: + return "JSON" + } +} + +// Result represents a json value that is returned from Get(). +type Result struct { + // Type is the json type + Type Type + // Raw is the raw json + Raw string + // Str is the json string + Str string + // Num is the json number + Num float64 + // Index of raw value in original json, zero means index unknown + Index int +} + +// String returns a string representation of the value. +func (t Result) String() string { + switch t.Type { + default: + return "" + case False: + return "false" + case Number: + if len(t.Raw) == 0 { + // calculated result + return strconv.FormatFloat(t.Num, 'f', -1, 64) + } + var i int + if t.Raw[0] == '-' { + i++ + } + for ; i < len(t.Raw); i++ { + if t.Raw[i] < '0' || t.Raw[i] > '9' { + return strconv.FormatFloat(t.Num, 'f', -1, 64) + } + } + return t.Raw + case String: + return t.Str + case JSON: + return t.Raw + case True: + return "true" + } +} + +// Bool returns an boolean representation. +func (t Result) Bool() bool { + switch t.Type { + default: + return false + case True: + return true + case String: + return t.Str != "" && t.Str != "0" && t.Str != "false" + case Number: + return t.Num != 0 + } +} + +// Int returns an integer representation. +func (t Result) Int() int64 { + switch t.Type { + default: + return 0 + case True: + return 1 + case String: + n, _ := parseInt(t.Str) + return n + case Number: + // try to directly convert the float64 to int64 + n, ok := floatToInt(t.Num) + if !ok { + // now try to parse the raw string + n, ok = parseInt(t.Raw) + if !ok { + // fallback to a standard conversion + return int64(t.Num) + } + } + return n + } +} + +// Uint returns an unsigned integer representation. +func (t Result) Uint() uint64 { + switch t.Type { + default: + return 0 + case True: + return 1 + case String: + n, _ := parseUint(t.Str) + return n + case Number: + // try to directly convert the float64 to uint64 + n, ok := floatToUint(t.Num) + if !ok { + // now try to parse the raw string + n, ok = parseUint(t.Raw) + if !ok { + // fallback to a standard conversion + return uint64(t.Num) + } + } + return n + } +} + +// Float returns an float64 representation. +func (t Result) Float() float64 { + switch t.Type { + default: + return 0 + case True: + return 1 + case String: + n, _ := strconv.ParseFloat(t.Str, 64) + return n + case Number: + return t.Num + } +} + +// Time returns a time.Time representation. +func (t Result) Time() time.Time { + res, _ := time.Parse(time.RFC3339, t.String()) + return res +} + +// Array returns back an array of values. +// If the result represents a non-existent value, then an empty array will be +// returned. If the result is not a JSON array, the return value will be an +// array containing one result. +func (t Result) Array() []Result { + if t.Type == Null { + return []Result{} + } + if t.Type != JSON { + return []Result{t} + } + r := t.arrayOrMap('[', false) + return r.a +} + +// IsObject returns true if the result value is a JSON object. +func (t Result) IsObject() bool { + return t.Type == JSON && len(t.Raw) > 0 && t.Raw[0] == '{' +} + +// IsArray returns true if the result value is a JSON array. +func (t Result) IsArray() bool { + return t.Type == JSON && len(t.Raw) > 0 && t.Raw[0] == '[' +} + +// ForEach iterates through values. +// If the result represents a non-existent value, then no values will be +// iterated. If the result is an Object, the iterator will pass the key and +// value of each item. If the result is an Array, the iterator will only pass +// the value of each item. If the result is not a JSON array or object, the +// iterator will pass back one value equal to the result. +func (t Result) ForEach(iterator func(key, value Result) bool) { + if !t.Exists() { + return + } + if t.Type != JSON { + iterator(Result{}, t) + return + } + json := t.Raw + var keys bool + var i int + var key, value Result + for ; i < len(json); i++ { + if json[i] == '{' { + i++ + key.Type = String + keys = true + break + } else if json[i] == '[' { + i++ + break + } + if json[i] > ' ' { + return + } + } + var str string + var vesc bool + var ok bool + for ; i < len(json); i++ { + if keys { + if json[i] != '"' { + continue + } + s := i + i, str, vesc, ok = parseString(json, i+1) + if !ok { + return + } + if vesc { + key.Str = unescape(str[1 : len(str)-1]) + } else { + key.Str = str[1 : len(str)-1] + } + key.Raw = str + key.Index = s + } + for ; i < len(json); i++ { + if json[i] <= ' ' || json[i] == ',' || json[i] == ':' { + continue + } + break + } + s := i + i, value, ok = parseAny(json, i, true) + if !ok { + return + } + value.Index = s + if !iterator(key, value) { + return + } + } +} + +// Map returns back an map of values. The result should be a JSON array. +func (t Result) Map() map[string]Result { + if t.Type != JSON { + return map[string]Result{} + } + r := t.arrayOrMap('{', false) + return r.o +} + +// Get searches result for the specified path. +// The result should be a JSON array or object. +func (t Result) Get(path string) Result { + return Get(t.Raw, path) +} + +type arrayOrMapResult struct { + a []Result + ai []interface{} + o map[string]Result + oi map[string]interface{} + vc byte +} + +func (t Result) arrayOrMap(vc byte, valueize bool) (r arrayOrMapResult) { + var json = t.Raw + var i int + var value Result + var count int + var key Result + if vc == 0 { + for ; i < len(json); i++ { + if json[i] == '{' || json[i] == '[' { + r.vc = json[i] + i++ + break + } + if json[i] > ' ' { + goto end + } + } + } else { + for ; i < len(json); i++ { + if json[i] == vc { + i++ + break + } + if json[i] > ' ' { + goto end + } + } + r.vc = vc + } + if r.vc == '{' { + if valueize { + r.oi = make(map[string]interface{}) + } else { + r.o = make(map[string]Result) + } + } else { + if valueize { + r.ai = make([]interface{}, 0) + } else { + r.a = make([]Result, 0) + } + } + for ; i < len(json); i++ { + if json[i] <= ' ' { + continue + } + // get next value + if json[i] == ']' || json[i] == '}' { + break + } + switch json[i] { + default: + if (json[i] >= '0' && json[i] <= '9') || json[i] == '-' { + value.Type = Number + value.Raw, value.Num = tonum(json[i:]) + value.Str = "" + } else { + continue + } + case '{', '[': + value.Type = JSON + value.Raw = squash(json[i:]) + value.Str, value.Num = "", 0 + case 'n': + value.Type = Null + value.Raw = tolit(json[i:]) + value.Str, value.Num = "", 0 + case 't': + value.Type = True + value.Raw = tolit(json[i:]) + value.Str, value.Num = "", 0 + case 'f': + value.Type = False + value.Raw = tolit(json[i:]) + value.Str, value.Num = "", 0 + case '"': + value.Type = String + value.Raw, value.Str = tostr(json[i:]) + value.Num = 0 + } + i += len(value.Raw) - 1 + + if r.vc == '{' { + if count%2 == 0 { + key = value + } else { + if valueize { + if _, ok := r.oi[key.Str]; !ok { + r.oi[key.Str] = value.Value() + } + } else { + if _, ok := r.o[key.Str]; !ok { + r.o[key.Str] = value + } + } + } + count++ + } else { + if valueize { + r.ai = append(r.ai, value.Value()) + } else { + r.a = append(r.a, value) + } + } + } +end: + return +} + +// Parse parses the json and returns a result. +// +// This function expects that the json is well-formed, and does not validate. +// Invalid json will not panic, but it may return back unexpected results. +// If you are consuming JSON from an unpredictable source then you may want to +// use the Valid function first. +func Parse(json string) Result { + var value Result + for i := 0; i < len(json); i++ { + if json[i] == '{' || json[i] == '[' { + value.Type = JSON + value.Raw = json[i:] // just take the entire raw + break + } + if json[i] <= ' ' { + continue + } + switch json[i] { + default: + if (json[i] >= '0' && json[i] <= '9') || json[i] == '-' { + value.Type = Number + value.Raw, value.Num = tonum(json[i:]) + } else { + return Result{} + } + case 'n': + value.Type = Null + value.Raw = tolit(json[i:]) + case 't': + value.Type = True + value.Raw = tolit(json[i:]) + case 'f': + value.Type = False + value.Raw = tolit(json[i:]) + case '"': + value.Type = String + value.Raw, value.Str = tostr(json[i:]) + } + break + } + return value +} + +// ParseBytes parses the json and returns a result. +// If working with bytes, this method preferred over Parse(string(data)) +func ParseBytes(json []byte) Result { + return Parse(string(json)) +} + +func squash(json string) string { + // expects that the lead character is a '[' or '{' + // squash the value, ignoring all nested arrays and objects. + // the first '[' or '{' has already been read + depth := 1 + for i := 1; i < len(json); i++ { + if json[i] >= '"' && json[i] <= '}' { + switch json[i] { + case '"': + i++ + s2 := i + for ; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + // look for an escaped slash + if json[i-1] == '\\' { + n := 0 + for j := i - 2; j > s2-1; j-- { + if json[j] != '\\' { + break + } + n++ + } + if n%2 == 0 { + continue + } + } + break + } + } + case '{', '[': + depth++ + case '}', ']': + depth-- + if depth == 0 { + return json[:i+1] + } + } + } + } + return json +} + +func tonum(json string) (raw string, num float64) { + for i := 1; i < len(json); i++ { + // less than dash might have valid characters + if json[i] <= '-' { + if json[i] <= ' ' || json[i] == ',' { + // break on whitespace and comma + raw = json[:i] + num, _ = strconv.ParseFloat(raw, 64) + return + } + // could be a '+' or '-'. let's assume so. + continue + } + if json[i] < ']' { + // probably a valid number + continue + } + if json[i] == 'e' || json[i] == 'E' { + // allow for exponential numbers + continue + } + // likely a ']' or '}' + raw = json[:i] + num, _ = strconv.ParseFloat(raw, 64) + return + } + raw = json + num, _ = strconv.ParseFloat(raw, 64) + return +} + +func tolit(json string) (raw string) { + for i := 1; i < len(json); i++ { + if json[i] < 'a' || json[i] > 'z' { + return json[:i] + } + } + return json +} + +func tostr(json string) (raw string, str string) { + // expects that the lead character is a '"' + for i := 1; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + return json[:i+1], json[1:i] + } + if json[i] == '\\' { + i++ + for ; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + // look for an escaped slash + if json[i-1] == '\\' { + n := 0 + for j := i - 2; j > 0; j-- { + if json[j] != '\\' { + break + } + n++ + } + if n%2 == 0 { + continue + } + } + break + } + } + var ret string + if i+1 < len(json) { + ret = json[:i+1] + } else { + ret = json[:i] + } + return ret, unescape(json[1:i]) + } + } + return json, json[1:] +} + +// Exists returns true if value exists. +// +// if gjson.Get(json, "name.last").Exists(){ +// println("value exists") +// } +func (t Result) Exists() bool { + return t.Type != Null || len(t.Raw) != 0 +} + +// Value returns one of these types: +// +// bool, for JSON booleans +// float64, for JSON numbers +// Number, for JSON numbers +// string, for JSON string literals +// nil, for JSON null +// map[string]interface{}, for JSON objects +// []interface{}, for JSON arrays +// +func (t Result) Value() interface{} { + if t.Type == String { + return t.Str + } + switch t.Type { + default: + return nil + case False: + return false + case Number: + return t.Num + case JSON: + r := t.arrayOrMap(0, true) + if r.vc == '{' { + return r.oi + } else if r.vc == '[' { + return r.ai + } + return nil + case True: + return true + } +} + +func parseString(json string, i int) (int, string, bool, bool) { + var s = i + for ; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + return i + 1, json[s-1 : i+1], false, true + } + if json[i] == '\\' { + i++ + for ; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + // look for an escaped slash + if json[i-1] == '\\' { + n := 0 + for j := i - 2; j > 0; j-- { + if json[j] != '\\' { + break + } + n++ + } + if n%2 == 0 { + continue + } + } + return i + 1, json[s-1 : i+1], true, true + } + } + break + } + } + return i, json[s-1:], false, false +} + +func parseNumber(json string, i int) (int, string) { + var s = i + i++ + for ; i < len(json); i++ { + if json[i] <= ' ' || json[i] == ',' || json[i] == ']' || + json[i] == '}' { + return i, json[s:i] + } + } + return i, json[s:] +} + +func parseLiteral(json string, i int) (int, string) { + var s = i + i++ + for ; i < len(json); i++ { + if json[i] < 'a' || json[i] > 'z' { + return i, json[s:i] + } + } + return i, json[s:] +} + +type arrayPathResult struct { + part string + path string + pipe string + piped bool + more bool + alogok bool + arrch bool + alogkey string + query struct { + on bool + path string + op string + value string + all bool + } +} + +func parseArrayPath(path string) (r arrayPathResult) { + for i := 0; i < len(path); i++ { + if path[i] == '|' { + r.part = path[:i] + r.pipe = path[i+1:] + r.piped = true + return + } + if path[i] == '.' { + r.part = path[:i] + r.path = path[i+1:] + r.more = true + return + } + if path[i] == '#' { + r.arrch = true + if i == 0 && len(path) > 1 { + if path[1] == '.' { + r.alogok = true + r.alogkey = path[2:] + r.path = path[:1] + } else if path[1] == '[' || path[1] == '(' { + // query + r.query.on = true + if true { + qpath, op, value, _, fi, ok := parseQuery(path[i:]) + if !ok { + // bad query, end now + break + } + r.query.path = qpath + r.query.op = op + r.query.value = value + i = fi - 1 + if i+1 < len(path) && path[i+1] == '#' { + r.query.all = true + } + } else { + var end byte + if path[1] == '[' { + end = ']' + } else { + end = ')' + } + i += 2 + // whitespace + for ; i < len(path); i++ { + if path[i] > ' ' { + break + } + } + s := i + for ; i < len(path); i++ { + if path[i] <= ' ' || + path[i] == '!' || + path[i] == '=' || + path[i] == '<' || + path[i] == '>' || + path[i] == '%' || + path[i] == end { + break + } + } + r.query.path = path[s:i] + // whitespace + for ; i < len(path); i++ { + if path[i] > ' ' { + break + } + } + if i < len(path) { + s = i + if path[i] == '!' { + if i < len(path)-1 && (path[i+1] == '=' || + path[i+1] == '%') { + i++ + } + } else if path[i] == '<' || path[i] == '>' { + if i < len(path)-1 && path[i+1] == '=' { + i++ + } + } else if path[i] == '=' { + if i < len(path)-1 && path[i+1] == '=' { + s++ + i++ + } + } + i++ + r.query.op = path[s:i] + // whitespace + for ; i < len(path); i++ { + if path[i] > ' ' { + break + } + } + s = i + for ; i < len(path); i++ { + if path[i] == '"' { + i++ + s2 := i + for ; i < len(path); i++ { + if path[i] > '\\' { + continue + } + if path[i] == '"' { + // look for an escaped slash + if path[i-1] == '\\' { + n := 0 + for j := i - 2; j > s2-1; j-- { + if path[j] != '\\' { + break + } + n++ + } + if n%2 == 0 { + continue + } + } + break + } + } + } else if path[i] == end { + if i+1 < len(path) && path[i+1] == '#' { + r.query.all = true + } + break + } + } + if i > len(path) { + i = len(path) + } + v := path[s:i] + for len(v) > 0 && v[len(v)-1] <= ' ' { + v = v[:len(v)-1] + } + r.query.value = v + } + } + } + } + continue + } + } + r.part = path + r.path = "" + return +} + +// splitQuery takes a query and splits it into three parts: +// path, op, middle, and right. +// So for this query: +// #(first_name=="Murphy").last +// Becomes +// first_name # path +// =="Murphy" # middle +// .last # right +// Or, +// #(service_roles.#(=="one")).cap +// Becomes +// service_roles.#(=="one") # path +// # middle +// .cap # right +func parseQuery(query string) ( + path, op, value, remain string, i int, ok bool, +) { + if len(query) < 2 || query[0] != '#' || + (query[1] != '(' && query[1] != '[') { + return "", "", "", "", i, false + } + i = 2 + j := 0 // start of value part + depth := 1 + for ; i < len(query); i++ { + if depth == 1 && j == 0 { + switch query[i] { + case '!', '=', '<', '>', '%': + // start of the value part + j = i + continue + } + } + if query[i] == '\\' { + i++ + } else if query[i] == '[' || query[i] == '(' { + depth++ + } else if query[i] == ']' || query[i] == ')' { + depth-- + if depth == 0 { + break + } + } else if query[i] == '"' { + // inside selector string, balance quotes + i++ + for ; i < len(query); i++ { + if query[i] == '\\' { + i++ + } else if query[i] == '"' { + break + } + } + } + } + if depth > 0 { + return "", "", "", "", i, false + } + if j > 0 { + path = trim(query[2:j]) + value = trim(query[j:i]) + remain = query[i+1:] + // parse the compare op from the value + var opsz int + switch { + case len(value) == 1: + opsz = 1 + case value[0] == '!' && value[1] == '=': + opsz = 2 + case value[0] == '!' && value[1] == '%': + opsz = 2 + case value[0] == '<' && value[1] == '=': + opsz = 2 + case value[0] == '>' && value[1] == '=': + opsz = 2 + case value[0] == '=' && value[1] == '=': + value = value[1:] + opsz = 1 + case value[0] == '<': + opsz = 1 + case value[0] == '>': + opsz = 1 + case value[0] == '=': + opsz = 1 + case value[0] == '%': + opsz = 1 + } + op = value[:opsz] + value = trim(value[opsz:]) + } else { + path = trim(query[2:i]) + remain = query[i+1:] + } + return path, op, value, remain, i + 1, true +} + +func trim(s string) string { +left: + if len(s) > 0 && s[0] <= ' ' { + s = s[1:] + goto left + } +right: + if len(s) > 0 && s[len(s)-1] <= ' ' { + s = s[:len(s)-1] + goto right + } + return s +} + +type objectPathResult struct { + part string + path string + pipe string + piped bool + wild bool + more bool +} + +func parseObjectPath(path string) (r objectPathResult) { + for i := 0; i < len(path); i++ { + if path[i] == '|' { + r.part = path[:i] + r.pipe = path[i+1:] + r.piped = true + return + } + if path[i] == '.' { + // peek at the next byte and see if it's a '@', '[', or '{'. + r.part = path[:i] + if !DisableModifiers && + i < len(path)-1 && + (path[i+1] == '@' || + path[i+1] == '[' || path[i+1] == '{') { + r.pipe = path[i+1:] + r.piped = true + } else { + r.path = path[i+1:] + r.more = true + } + return + } + if path[i] == '*' || path[i] == '?' { + r.wild = true + continue + } + if path[i] == '\\' { + // go into escape mode. this is a slower path that + // strips off the escape character from the part. + epart := []byte(path[:i]) + i++ + if i < len(path) { + epart = append(epart, path[i]) + i++ + for ; i < len(path); i++ { + if path[i] == '\\' { + i++ + if i < len(path) { + epart = append(epart, path[i]) + } + continue + } else if path[i] == '.' { + r.part = string(epart) + // peek at the next byte and see if it's a '@' modifier + if !DisableModifiers && + i < len(path)-1 && path[i+1] == '@' { + r.pipe = path[i+1:] + r.piped = true + } else { + r.path = path[i+1:] + r.more = true + } + r.more = true + return + } else if path[i] == '|' { + r.part = string(epart) + r.pipe = path[i+1:] + r.piped = true + return + } else if path[i] == '*' || path[i] == '?' { + r.wild = true + } + epart = append(epart, path[i]) + } + } + // append the last part + r.part = string(epart) + return + } + } + r.part = path + return +} + +func parseSquash(json string, i int) (int, string) { + // expects that the lead character is a '[' or '{' + // squash the value, ignoring all nested arrays and objects. + // the first '[' or '{' has already been read + s := i + i++ + depth := 1 + for ; i < len(json); i++ { + if json[i] >= '"' && json[i] <= '}' { + switch json[i] { + case '"': + i++ + s2 := i + for ; i < len(json); i++ { + if json[i] > '\\' { + continue + } + if json[i] == '"' { + // look for an escaped slash + if json[i-1] == '\\' { + n := 0 + for j := i - 2; j > s2-1; j-- { + if json[j] != '\\' { + break + } + n++ + } + if n%2 == 0 { + continue + } + } + break + } + } + case '{', '[': + depth++ + case '}', ']': + depth-- + if depth == 0 { + i++ + return i, json[s:i] + } + } + } + } + return i, json[s:] +} + +func parseObject(c *parseContext, i int, path string) (int, bool) { + var pmatch, kesc, vesc, ok, hit bool + var key, val string + rp := parseObjectPath(path) + if !rp.more && rp.piped { + c.pipe = rp.pipe + c.piped = true + } + for i < len(c.json) { + for ; i < len(c.json); i++ { + if c.json[i] == '"' { + // parse_key_string + // this is slightly different from getting s string value + // because we don't need the outer quotes. + i++ + var s = i + for ; i < len(c.json); i++ { + if c.json[i] > '\\' { + continue + } + if c.json[i] == '"' { + i, key, kesc, ok = i+1, c.json[s:i], false, true + goto parse_key_string_done + } + if c.json[i] == '\\' { + i++ + for ; i < len(c.json); i++ { + if c.json[i] > '\\' { + continue + } + if c.json[i] == '"' { + // look for an escaped slash + if c.json[i-1] == '\\' { + n := 0 + for j := i - 2; j > 0; j-- { + if c.json[j] != '\\' { + break + } + n++ + } + if n%2 == 0 { + continue + } + } + i, key, kesc, ok = i+1, c.json[s:i], true, true + goto parse_key_string_done + } + } + break + } + } + key, kesc, ok = c.json[s:], false, false + parse_key_string_done: + break + } + if c.json[i] == '}' { + return i + 1, false + } + } + if !ok { + return i, false + } + if rp.wild { + if kesc { + pmatch = match.Match(unescape(key), rp.part) + } else { + pmatch = match.Match(key, rp.part) + } + } else { + if kesc { + pmatch = rp.part == unescape(key) + } else { + pmatch = rp.part == key + } + } + hit = pmatch && !rp.more + for ; i < len(c.json); i++ { + switch c.json[i] { + default: + continue + case '"': + i++ + i, val, vesc, ok = parseString(c.json, i) + if !ok { + return i, false + } + if hit { + if vesc { + c.value.Str = unescape(val[1 : len(val)-1]) + } else { + c.value.Str = val[1 : len(val)-1] + } + c.value.Raw = val + c.value.Type = String + return i, true + } + case '{': + if pmatch && !hit { + i, hit = parseObject(c, i+1, rp.path) + if hit { + return i, true + } + } else { + i, val = parseSquash(c.json, i) + if hit { + c.value.Raw = val + c.value.Type = JSON + return i, true + } + } + case '[': + if pmatch && !hit { + i, hit = parseArray(c, i+1, rp.path) + if hit { + return i, true + } + } else { + i, val = parseSquash(c.json, i) + if hit { + c.value.Raw = val + c.value.Type = JSON + return i, true + } + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + i, val = parseNumber(c.json, i) + if hit { + c.value.Raw = val + c.value.Type = Number + c.value.Num, _ = strconv.ParseFloat(val, 64) + return i, true + } + case 't', 'f', 'n': + vc := c.json[i] + i, val = parseLiteral(c.json, i) + if hit { + c.value.Raw = val + switch vc { + case 't': + c.value.Type = True + case 'f': + c.value.Type = False + } + return i, true + } + } + break + } + } + return i, false +} +func queryMatches(rp *arrayPathResult, value Result) bool { + rpv := rp.query.value + if len(rpv) > 2 && rpv[0] == '"' && rpv[len(rpv)-1] == '"' { + rpv = rpv[1 : len(rpv)-1] + } + if !value.Exists() { + return false + } + if rp.query.op == "" { + // the query is only looking for existence, such as: + // friends.#(name) + // which makes sure that the array "friends" has an element of + // "name" that exists + return true + } + switch value.Type { + case String: + switch rp.query.op { + case "=": + return value.Str == rpv + case "!=": + return value.Str != rpv + case "<": + return value.Str < rpv + case "<=": + return value.Str <= rpv + case ">": + return value.Str > rpv + case ">=": + return value.Str >= rpv + case "%": + return match.Match(value.Str, rpv) + case "!%": + return !match.Match(value.Str, rpv) + } + case Number: + rpvn, _ := strconv.ParseFloat(rpv, 64) + switch rp.query.op { + case "=": + return value.Num == rpvn + case "!=": + return value.Num != rpvn + case "<": + return value.Num < rpvn + case "<=": + return value.Num <= rpvn + case ">": + return value.Num > rpvn + case ">=": + return value.Num >= rpvn + } + case True: + switch rp.query.op { + case "=": + return rpv == "true" + case "!=": + return rpv != "true" + case ">": + return rpv == "false" + case ">=": + return true + } + case False: + switch rp.query.op { + case "=": + return rpv == "false" + case "!=": + return rpv != "false" + case "<": + return rpv == "true" + case "<=": + return true + } + } + return false +} +func parseArray(c *parseContext, i int, path string) (int, bool) { + var pmatch, vesc, ok, hit bool + var val string + var h int + var alog []int + var partidx int + var multires []byte + rp := parseArrayPath(path) + if !rp.arrch { + n, ok := parseUint(rp.part) + if !ok { + partidx = -1 + } else { + partidx = int(n) + } + } + if !rp.more && rp.piped { + c.pipe = rp.pipe + c.piped = true + } + + procQuery := func(qval Result) bool { + if rp.query.all { + if len(multires) == 0 { + multires = append(multires, '[') + } + } + var res Result + if qval.Type == JSON { + res = qval.Get(rp.query.path) + } else { + if rp.query.path != "" { + return false + } + res = qval + } + if queryMatches(&rp, res) { + if rp.more { + left, right, ok := splitPossiblePipe(rp.path) + if ok { + rp.path = left + c.pipe = right + c.piped = true + } + res = qval.Get(rp.path) + } else { + res = qval + } + if rp.query.all { + raw := res.Raw + if len(raw) == 0 { + raw = res.String() + } + if raw != "" { + if len(multires) > 1 { + multires = append(multires, ',') + } + multires = append(multires, raw...) + } + } else { + c.value = res + return true + } + } + return false + } + + for i < len(c.json)+1 { + if !rp.arrch { + pmatch = partidx == h + hit = pmatch && !rp.more + } + h++ + if rp.alogok { + alog = append(alog, i) + } + for ; ; i++ { + var ch byte + if i > len(c.json) { + break + } else if i == len(c.json) { + ch = ']' + } else { + ch = c.json[i] + } + switch ch { + default: + continue + case '"': + i++ + i, val, vesc, ok = parseString(c.json, i) + if !ok { + return i, false + } + if rp.query.on { + var qval Result + if vesc { + qval.Str = unescape(val[1 : len(val)-1]) + } else { + qval.Str = val[1 : len(val)-1] + } + qval.Raw = val + qval.Type = String + if procQuery(qval) { + return i, true + } + } else if hit { + if rp.alogok { + break + } + if vesc { + c.value.Str = unescape(val[1 : len(val)-1]) + } else { + c.value.Str = val[1 : len(val)-1] + } + c.value.Raw = val + c.value.Type = String + return i, true + } + case '{': + if pmatch && !hit { + i, hit = parseObject(c, i+1, rp.path) + if hit { + if rp.alogok { + break + } + return i, true + } + } else { + i, val = parseSquash(c.json, i) + if rp.query.on { + if procQuery(Result{Raw: val, Type: JSON}) { + return i, true + } + } else if hit { + if rp.alogok { + break + } + c.value.Raw = val + c.value.Type = JSON + return i, true + } + } + case '[': + if pmatch && !hit { + i, hit = parseArray(c, i+1, rp.path) + if hit { + if rp.alogok { + break + } + return i, true + } + } else { + i, val = parseSquash(c.json, i) + if rp.query.on { + if procQuery(Result{Raw: val, Type: JSON}) { + return i, true + } + } else if hit { + if rp.alogok { + break + } + c.value.Raw = val + c.value.Type = JSON + return i, true + } + } + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + i, val = parseNumber(c.json, i) + if rp.query.on { + var qval Result + qval.Raw = val + qval.Type = Number + qval.Num, _ = strconv.ParseFloat(val, 64) + if procQuery(qval) { + return i, true + } + } else if hit { + if rp.alogok { + break + } + c.value.Raw = val + c.value.Type = Number + c.value.Num, _ = strconv.ParseFloat(val, 64) + return i, true + } + case 't', 'f', 'n': + vc := c.json[i] + i, val = parseLiteral(c.json, i) + if rp.query.on { + var qval Result + qval.Raw = val + switch vc { + case 't': + qval.Type = True + case 'f': + qval.Type = False + } + if procQuery(qval) { + return i, true + } + } else if hit { + if rp.alogok { + break + } + c.value.Raw = val + switch vc { + case 't': + c.value.Type = True + case 'f': + c.value.Type = False + } + return i, true + } + case ']': + if rp.arrch && rp.part == "#" { + if rp.alogok { + left, right, ok := splitPossiblePipe(rp.alogkey) + if ok { + rp.alogkey = left + c.pipe = right + c.piped = true + } + var jsons = make([]byte, 0, 64) + jsons = append(jsons, '[') + for j, k := 0, 0; j < len(alog); j++ { + _, res, ok := parseAny(c.json, alog[j], true) + if ok { + res := res.Get(rp.alogkey) + if res.Exists() { + if k > 0 { + jsons = append(jsons, ',') + } + raw := res.Raw + if len(raw) == 0 { + raw = res.String() + } + jsons = append(jsons, []byte(raw)...) + k++ + } + } + } + jsons = append(jsons, ']') + c.value.Type = JSON + c.value.Raw = string(jsons) + return i + 1, true + } + if rp.alogok { + break + } + + c.value.Type = Number + c.value.Num = float64(h - 1) + c.value.Raw = strconv.Itoa(h - 1) + c.calcd = true + return i + 1, true + } + if len(multires) > 0 && !c.value.Exists() { + c.value = Result{ + Raw: string(append(multires, ']')), + Type: JSON, + } + } + return i + 1, false + } + break + } + } + return i, false +} + +func splitPossiblePipe(path string) (left, right string, ok bool) { + // take a quick peek for the pipe character. If found we'll split the piped + // part of the path into the c.pipe field and shorten the rp. + var possible bool + for i := 0; i < len(path); i++ { + if path[i] == '|' { + possible = true + break + } + } + if !possible { + return + } + + // split the left and right side of the path with the pipe character as + // the delimiter. This is a little tricky because we'll need to basically + // parse the entire path. + + for i := 0; i < len(path); i++ { + if path[i] == '\\' { + i++ + } else if path[i] == '.' { + if i == len(path)-1 { + return + } + if path[i+1] == '#' { + i += 2 + if i == len(path) { + return + } + if path[i] == '[' || path[i] == '(' { + var start, end byte + if path[i] == '[' { + start, end = '[', ']' + } else { + start, end = '(', ')' + } + // inside selector, balance brackets + i++ + depth := 1 + for ; i < len(path); i++ { + if path[i] == '\\' { + i++ + } else if path[i] == start { + depth++ + } else if path[i] == end { + depth-- + if depth == 0 { + break + } + } else if path[i] == '"' { + // inside selector string, balance quotes + i++ + for ; i < len(path); i++ { + if path[i] == '\\' { + i++ + } else if path[i] == '"' { + break + } + } + } + } + } + } + } else if path[i] == '|' { + return path[:i], path[i+1:], true + } + } + return +} + +// ForEachLine iterates through lines of JSON as specified by the JSON Lines +// format (http://jsonlines.org/). +// Each line is returned as a GJSON Result. +func ForEachLine(json string, iterator func(line Result) bool) { + var res Result + var i int + for { + i, res, _ = parseAny(json, i, true) + if !res.Exists() { + break + } + if !iterator(res) { + return + } + } +} + +type subSelector struct { + name string + path string +} + +// parseSubSelectors returns the subselectors belonging to a '[path1,path2]' or +// '{"field1":path1,"field2":path2}' type subSelection. It's expected that the +// first character in path is either '[' or '{', and has already been checked +// prior to calling this function. +func parseSubSelectors(path string) (sels []subSelector, out string, ok bool) { + depth := 1 + colon := 0 + start := 1 + i := 1 + pushSel := func() { + var sel subSelector + if colon == 0 { + sel.path = path[start:i] + } else { + sel.name = path[start:colon] + sel.path = path[colon+1 : i] + } + sels = append(sels, sel) + colon = 0 + start = i + 1 + } + for ; i < len(path); i++ { + switch path[i] { + case '\\': + i++ + case ':': + if depth == 1 { + colon = i + } + case ',': + if depth == 1 { + pushSel() + } + case '"': + i++ + loop: + for ; i < len(path); i++ { + switch path[i] { + case '\\': + i++ + case '"': + break loop + } + } + case '[', '(', '{': + depth++ + case ']', ')', '}': + depth-- + if depth == 0 { + pushSel() + path = path[i+1:] + return sels, path, true + } + } + } + return +} + +// nameOfLast returns the name of the last component +func nameOfLast(path string) string { + for i := len(path) - 1; i >= 0; i-- { + if path[i] == '|' || path[i] == '.' { + if i > 0 { + if path[i-1] == '\\' { + continue + } + } + return path[i+1:] + } + } + return path +} + +func isSimpleName(component string) bool { + for i := 0; i < len(component); i++ { + if component[i] < ' ' { + return false + } + switch component[i] { + case '[', ']', '{', '}', '(', ')', '#', '|': + return false + } + } + return true +} + +func appendJSONString(dst []byte, s string) []byte { + for i := 0; i < len(s); i++ { + if s[i] < ' ' || s[i] == '\\' || s[i] == '"' || s[i] > 126 { + d, _ := json.Marshal(s) + return append(dst, string(d)...) + } + } + dst = append(dst, '"') + dst = append(dst, s...) + dst = append(dst, '"') + return dst +} + +type parseContext struct { + json string + value Result + pipe string + piped bool + calcd bool + lines bool +} + +// Get searches json for the specified path. +// A path is in dot syntax, such as "name.last" or "age". +// When the value is found it's returned immediately. +// +// A path is a series of keys searated by a dot. +// A key may contain special wildcard characters '*' and '?'. +// To access an array value use the index as the key. +// To get the number of elements in an array or to access a child path, use +// the '#' character. +// The dot and wildcard character can be escaped with '\'. +// +// { +// "name": {"first": "Tom", "last": "Anderson"}, +// "age":37, +// "children": ["Sara","Alex","Jack"], +// "friends": [ +// {"first": "James", "last": "Murphy"}, +// {"first": "Roger", "last": "Craig"} +// ] +// } +// "name.last" >> "Anderson" +// "age" >> 37 +// "children" >> ["Sara","Alex","Jack"] +// "children.#" >> 3 +// "children.1" >> "Alex" +// "child*.2" >> "Jack" +// "c?ildren.0" >> "Sara" +// "friends.#.first" >> ["James","Roger"] +// +// This function expects that the json is well-formed, and does not validate. +// Invalid json will not panic, but it may return back unexpected results. +// If you are consuming JSON from an unpredictable source then you may want to +// use the Valid function first. +func Get(json, path string) Result { + if len(path) > 1 { + if !DisableModifiers { + if path[0] == '@' { + // possible modifier + var ok bool + var npath string + var rjson string + npath, rjson, ok = execModifier(json, path) + if ok { + path = npath + if len(path) > 0 && (path[0] == '|' || path[0] == '.') { + res := Get(rjson, path[1:]) + res.Index = 0 + return res + } + return Parse(rjson) + } + } + } + if path[0] == '[' || path[0] == '{' { + // using a subselector path + kind := path[0] + var ok bool + var subs []subSelector + subs, path, ok = parseSubSelectors(path) + if ok { + if len(path) == 0 || (path[0] == '|' || path[0] == '.') { + var b []byte + b = append(b, kind) + var i int + for _, sub := range subs { + res := Get(json, sub.path) + if res.Exists() { + if i > 0 { + b = append(b, ',') + } + if kind == '{' { + if len(sub.name) > 0 { + if sub.name[0] == '"' && Valid(sub.name) { + b = append(b, sub.name...) + } else { + b = appendJSONString(b, sub.name) + } + } else { + last := nameOfLast(sub.path) + if isSimpleName(last) { + b = appendJSONString(b, last) + } else { + b = appendJSONString(b, "_") + } + } + b = append(b, ':') + } + var raw string + if len(res.Raw) == 0 { + raw = res.String() + if len(raw) == 0 { + raw = "null" + } + } else { + raw = res.Raw + } + b = append(b, raw...) + i++ + } + } + b = append(b, kind+2) + var res Result + res.Raw = string(b) + res.Type = JSON + if len(path) > 0 { + res = res.Get(path[1:]) + } + res.Index = 0 + return res + } + } + } + } + + var i int + var c = &parseContext{json: json} + if len(path) >= 2 && path[0] == '.' && path[1] == '.' { + c.lines = true + parseArray(c, 0, path[2:]) + } else { + for ; i < len(c.json); i++ { + if c.json[i] == '{' { + i++ + parseObject(c, i, path) + break + } + if c.json[i] == '[' { + i++ + parseArray(c, i, path) + break + } + } + } + if c.piped { + res := c.value.Get(c.pipe) + res.Index = 0 + return res + } + fillIndex(json, c) + return c.value +} + +// GetBytes searches json for the specified path. +// If working with bytes, this method preferred over Get(string(data), path) +func GetBytes(json []byte, path string) Result { + return getBytes(json, path) +} + +// runeit returns the rune from the the \uXXXX +func runeit(json string) rune { + n, _ := strconv.ParseUint(json[:4], 16, 64) + return rune(n) +} + +// unescape unescapes a string +func unescape(json string) string { //, error) { + var str = make([]byte, 0, len(json)) + for i := 0; i < len(json); i++ { + switch { + default: + str = append(str, json[i]) + case json[i] < ' ': + return string(str) + case json[i] == '\\': + i++ + if i >= len(json) { + return string(str) + } + switch json[i] { + default: + return string(str) + case '\\': + str = append(str, '\\') + case '/': + str = append(str, '/') + case 'b': + str = append(str, '\b') + case 'f': + str = append(str, '\f') + case 'n': + str = append(str, '\n') + case 'r': + str = append(str, '\r') + case 't': + str = append(str, '\t') + case '"': + str = append(str, '"') + case 'u': + if i+5 > len(json) { + return string(str) + } + r := runeit(json[i+1:]) + i += 5 + if utf16.IsSurrogate(r) { + // need another code + if len(json[i:]) >= 6 && json[i] == '\\' && + json[i+1] == 'u' { + // we expect it to be correct so just consume it + r = utf16.DecodeRune(r, runeit(json[i+2:])) + i += 6 + } + } + // provide enough space to encode the largest utf8 possible + str = append(str, 0, 0, 0, 0, 0, 0, 0, 0) + n := utf8.EncodeRune(str[len(str)-8:], r) + str = str[:len(str)-8+n] + i-- // backtrack index by one + } + } + } + return string(str) +} + +// Less return true if a token is less than another token. +// The caseSensitive paramater is used when the tokens are Strings. +// The order when comparing two different type is: +// +// Null < False < Number < String < True < JSON +// +func (t Result) Less(token Result, caseSensitive bool) bool { + if t.Type < token.Type { + return true + } + if t.Type > token.Type { + return false + } + if t.Type == String { + if caseSensitive { + return t.Str < token.Str + } + return stringLessInsensitive(t.Str, token.Str) + } + if t.Type == Number { + return t.Num < token.Num + } + return t.Raw < token.Raw +} + +func stringLessInsensitive(a, b string) bool { + for i := 0; i < len(a) && i < len(b); i++ { + if a[i] >= 'A' && a[i] <= 'Z' { + if b[i] >= 'A' && b[i] <= 'Z' { + // both are uppercase, do nothing + if a[i] < b[i] { + return true + } else if a[i] > b[i] { + return false + } + } else { + // a is uppercase, convert a to lowercase + if a[i]+32 < b[i] { + return true + } else if a[i]+32 > b[i] { + return false + } + } + } else if b[i] >= 'A' && b[i] <= 'Z' { + // b is uppercase, convert b to lowercase + if a[i] < b[i]+32 { + return true + } else if a[i] > b[i]+32 { + return false + } + } else { + // neither are uppercase + if a[i] < b[i] { + return true + } else if a[i] > b[i] { + return false + } + } + } + return len(a) < len(b) +} + +// parseAny parses the next value from a json string. +// A Result is returned when the hit param is set. +// The return values are (i int, res Result, ok bool) +func parseAny(json string, i int, hit bool) (int, Result, bool) { + var res Result + var val string + for ; i < len(json); i++ { + if json[i] == '{' || json[i] == '[' { + i, val = parseSquash(json, i) + if hit { + res.Raw = val + res.Type = JSON + } + return i, res, true + } + if json[i] <= ' ' { + continue + } + switch json[i] { + case '"': + i++ + var vesc bool + var ok bool + i, val, vesc, ok = parseString(json, i) + if !ok { + return i, res, false + } + if hit { + res.Type = String + res.Raw = val + if vesc { + res.Str = unescape(val[1 : len(val)-1]) + } else { + res.Str = val[1 : len(val)-1] + } + } + return i, res, true + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + i, val = parseNumber(json, i) + if hit { + res.Raw = val + res.Type = Number + res.Num, _ = strconv.ParseFloat(val, 64) + } + return i, res, true + case 't', 'f', 'n': + vc := json[i] + i, val = parseLiteral(json, i) + if hit { + res.Raw = val + switch vc { + case 't': + res.Type = True + case 'f': + res.Type = False + } + return i, res, true + } + } + } + return i, res, false +} + +var ( // used for testing + testWatchForFallback bool + testLastWasFallback bool +) + +// GetMany searches json for the multiple paths. +// The return value is a Result array where the number of items +// will be equal to the number of input paths. +func GetMany(json string, path ...string) []Result { + res := make([]Result, len(path)) + for i, path := range path { + res[i] = Get(json, path) + } + return res +} + +// GetManyBytes searches json for the multiple paths. +// The return value is a Result array where the number of items +// will be equal to the number of input paths. +func GetManyBytes(json []byte, path ...string) []Result { + res := make([]Result, len(path)) + for i, path := range path { + res[i] = GetBytes(json, path) + } + return res +} + +var fieldsmu sync.RWMutex +var fields = make(map[string]map[string]int) + +func assign(jsval Result, goval reflect.Value) { + if jsval.Type == Null { + return + } + switch goval.Kind() { + default: + case reflect.Ptr: + if !goval.IsNil() { + newval := reflect.New(goval.Elem().Type()) + assign(jsval, newval.Elem()) + goval.Elem().Set(newval.Elem()) + } else { + newval := reflect.New(goval.Type().Elem()) + assign(jsval, newval.Elem()) + goval.Set(newval) + } + case reflect.Struct: + fieldsmu.RLock() + sf := fields[goval.Type().String()] + fieldsmu.RUnlock() + if sf == nil { + fieldsmu.Lock() + sf = make(map[string]int) + for i := 0; i < goval.Type().NumField(); i++ { + f := goval.Type().Field(i) + tag := strings.Split(f.Tag.Get("json"), ",")[0] + if tag != "-" { + if tag != "" { + sf[tag] = i + sf[f.Name] = i + } else { + sf[f.Name] = i + } + } + } + fields[goval.Type().String()] = sf + fieldsmu.Unlock() + } + jsval.ForEach(func(key, value Result) bool { + if idx, ok := sf[key.Str]; ok { + f := goval.Field(idx) + if f.CanSet() { + assign(value, f) + } + } + return true + }) + case reflect.Slice: + if goval.Type().Elem().Kind() == reflect.Uint8 && + jsval.Type == String { + data, _ := base64.StdEncoding.DecodeString(jsval.String()) + goval.Set(reflect.ValueOf(data)) + } else { + jsvals := jsval.Array() + slice := reflect.MakeSlice(goval.Type(), len(jsvals), len(jsvals)) + for i := 0; i < len(jsvals); i++ { + assign(jsvals[i], slice.Index(i)) + } + goval.Set(slice) + } + case reflect.Array: + i, n := 0, goval.Len() + jsval.ForEach(func(_, value Result) bool { + if i == n { + return false + } + assign(value, goval.Index(i)) + i++ + return true + }) + case reflect.Map: + if goval.Type().Key().Kind() == reflect.String && + goval.Type().Elem().Kind() == reflect.Interface { + goval.Set(reflect.ValueOf(jsval.Value())) + } + case reflect.Interface: + goval.Set(reflect.ValueOf(jsval.Value())) + case reflect.Bool: + goval.SetBool(jsval.Bool()) + case reflect.Float32, reflect.Float64: + goval.SetFloat(jsval.Float()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64: + goval.SetInt(jsval.Int()) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64: + goval.SetUint(jsval.Uint()) + case reflect.String: + goval.SetString(jsval.String()) + } + if len(goval.Type().PkgPath()) > 0 { + v := goval.Addr() + if v.Type().NumMethod() > 0 { + if u, ok := v.Interface().(json.Unmarshaler); ok { + u.UnmarshalJSON([]byte(jsval.Raw)) + } + } + } +} + +var validate uintptr = 1 + +// UnmarshalValidationEnabled provides the option to disable JSON validation +// during the Unmarshal routine. Validation is enabled by default. +// +// Deprecated: Use encoder/json.Unmarshal instead +func UnmarshalValidationEnabled(enabled bool) { + if enabled { + atomic.StoreUintptr(&validate, 1) + } else { + atomic.StoreUintptr(&validate, 0) + } +} + +// Unmarshal loads the JSON data into the value pointed to by v. +// +// This function works almost identically to json.Unmarshal except that +// gjson.Unmarshal will automatically attempt to convert JSON values to any Go +// type. For example, the JSON string "100" or the JSON number 100 can be +// equally assigned to Go string, int, byte, uint64, etc. This rule applies to +// all types. +// +// Deprecated: Use encoder/json.Unmarshal instead +func Unmarshal(data []byte, v interface{}) error { + if atomic.LoadUintptr(&validate) == 1 { + _, ok := validpayload(data, 0) + if !ok { + return errors.New("invalid json") + } + } + if v := reflect.ValueOf(v); v.Kind() == reflect.Ptr { + assign(ParseBytes(data), v) + } + return nil +} + +func validpayload(data []byte, i int) (outi int, ok bool) { + for ; i < len(data); i++ { + switch data[i] { + default: + i, ok = validany(data, i) + if !ok { + return i, false + } + for ; i < len(data); i++ { + switch data[i] { + default: + return i, false + case ' ', '\t', '\n', '\r': + continue + } + } + return i, true + case ' ', '\t', '\n', '\r': + continue + } + } + return i, false +} +func validany(data []byte, i int) (outi int, ok bool) { + for ; i < len(data); i++ { + switch data[i] { + default: + return i, false + case ' ', '\t', '\n', '\r': + continue + case '{': + return validobject(data, i+1) + case '[': + return validarray(data, i+1) + case '"': + return validstring(data, i+1) + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return validnumber(data, i+1) + case 't': + return validtrue(data, i+1) + case 'f': + return validfalse(data, i+1) + case 'n': + return validnull(data, i+1) + } + } + return i, false +} +func validobject(data []byte, i int) (outi int, ok bool) { + for ; i < len(data); i++ { + switch data[i] { + default: + return i, false + case ' ', '\t', '\n', '\r': + continue + case '}': + return i + 1, true + case '"': + key: + if i, ok = validstring(data, i+1); !ok { + return i, false + } + if i, ok = validcolon(data, i); !ok { + return i, false + } + if i, ok = validany(data, i); !ok { + return i, false + } + if i, ok = validcomma(data, i, '}'); !ok { + return i, false + } + if data[i] == '}' { + return i + 1, true + } + i++ + for ; i < len(data); i++ { + switch data[i] { + default: + return i, false + case ' ', '\t', '\n', '\r': + continue + case '"': + goto key + } + } + return i, false + } + } + return i, false +} +func validcolon(data []byte, i int) (outi int, ok bool) { + for ; i < len(data); i++ { + switch data[i] { + default: + return i, false + case ' ', '\t', '\n', '\r': + continue + case ':': + return i + 1, true + } + } + return i, false +} +func validcomma(data []byte, i int, end byte) (outi int, ok bool) { + for ; i < len(data); i++ { + switch data[i] { + default: + return i, false + case ' ', '\t', '\n', '\r': + continue + case ',': + return i, true + case end: + return i, true + } + } + return i, false +} +func validarray(data []byte, i int) (outi int, ok bool) { + for ; i < len(data); i++ { + switch data[i] { + default: + for ; i < len(data); i++ { + if i, ok = validany(data, i); !ok { + return i, false + } + if i, ok = validcomma(data, i, ']'); !ok { + return i, false + } + if data[i] == ']' { + return i + 1, true + } + } + case ' ', '\t', '\n', '\r': + continue + case ']': + return i + 1, true + } + } + return i, false +} +func validstring(data []byte, i int) (outi int, ok bool) { + for ; i < len(data); i++ { + if data[i] < ' ' { + return i, false + } else if data[i] == '\\' { + i++ + if i == len(data) { + return i, false + } + switch data[i] { + default: + return i, false + case '"', '\\', '/', 'b', 'f', 'n', 'r', 't': + case 'u': + for j := 0; j < 4; j++ { + i++ + if i >= len(data) { + return i, false + } + if !((data[i] >= '0' && data[i] <= '9') || + (data[i] >= 'a' && data[i] <= 'f') || + (data[i] >= 'A' && data[i] <= 'F')) { + return i, false + } + } + } + } else if data[i] == '"' { + return i + 1, true + } + } + return i, false +} +func validnumber(data []byte, i int) (outi int, ok bool) { + i-- + // sign + if data[i] == '-' { + i++ + } + // int + if i == len(data) { + return i, false + } + if data[i] == '0' { + i++ + } else { + for ; i < len(data); i++ { + if data[i] >= '0' && data[i] <= '9' { + continue + } + break + } + } + // frac + if i == len(data) { + return i, true + } + if data[i] == '.' { + i++ + if i == len(data) { + return i, false + } + if data[i] < '0' || data[i] > '9' { + return i, false + } + i++ + for ; i < len(data); i++ { + if data[i] >= '0' && data[i] <= '9' { + continue + } + break + } + } + // exp + if i == len(data) { + return i, true + } + if data[i] == 'e' || data[i] == 'E' { + i++ + if i == len(data) { + return i, false + } + if data[i] == '+' || data[i] == '-' { + i++ + } + if i == len(data) { + return i, false + } + if data[i] < '0' || data[i] > '9' { + return i, false + } + i++ + for ; i < len(data); i++ { + if data[i] >= '0' && data[i] <= '9' { + continue + } + break + } + } + return i, true +} + +func validtrue(data []byte, i int) (outi int, ok bool) { + if i+3 <= len(data) && data[i] == 'r' && data[i+1] == 'u' && + data[i+2] == 'e' { + return i + 3, true + } + return i, false +} +func validfalse(data []byte, i int) (outi int, ok bool) { + if i+4 <= len(data) && data[i] == 'a' && data[i+1] == 'l' && + data[i+2] == 's' && data[i+3] == 'e' { + return i + 4, true + } + return i, false +} +func validnull(data []byte, i int) (outi int, ok bool) { + if i+3 <= len(data) && data[i] == 'u' && data[i+1] == 'l' && + data[i+2] == 'l' { + return i + 3, true + } + return i, false +} + +// Valid returns true if the input is valid json. +// +// if !gjson.Valid(json) { +// return errors.New("invalid json") +// } +// value := gjson.Get(json, "name.last") +// +func Valid(json string) bool { + _, ok := validpayload(stringBytes(json), 0) + return ok +} + +// ValidBytes returns true if the input is valid json. +// +// if !gjson.Valid(json) { +// return errors.New("invalid json") +// } +// value := gjson.Get(json, "name.last") +// +// If working with bytes, this method preferred over ValidBytes(string(data)) +// +func ValidBytes(json []byte) bool { + _, ok := validpayload(json, 0) + return ok +} + +func parseUint(s string) (n uint64, ok bool) { + var i int + if i == len(s) { + return 0, false + } + for ; i < len(s); i++ { + if s[i] >= '0' && s[i] <= '9' { + n = n*10 + uint64(s[i]-'0') + } else { + return 0, false + } + } + return n, true +} + +func parseInt(s string) (n int64, ok bool) { + var i int + var sign bool + if len(s) > 0 && s[0] == '-' { + sign = true + i++ + } + if i == len(s) { + return 0, false + } + for ; i < len(s); i++ { + if s[i] >= '0' && s[i] <= '9' { + n = n*10 + int64(s[i]-'0') + } else { + return 0, false + } + } + if sign { + return n * -1, true + } + return n, true +} + +const minUint53 = 0 +const maxUint53 = 4503599627370495 +const minInt53 = -2251799813685248 +const maxInt53 = 2251799813685247 + +func floatToUint(f float64) (n uint64, ok bool) { + n = uint64(f) + if float64(n) == f && n >= minUint53 && n <= maxUint53 { + return n, true + } + return 0, false +} + +func floatToInt(f float64) (n int64, ok bool) { + n = int64(f) + if float64(n) == f && n >= minInt53 && n <= maxInt53 { + return n, true + } + return 0, false +} + +// execModifier parses the path to find a matching modifier function. +// then input expects that the path already starts with a '@' +func execModifier(json, path string) (pathOut, res string, ok bool) { + name := path[1:] + var hasArgs bool + for i := 1; i < len(path); i++ { + if path[i] == ':' { + pathOut = path[i+1:] + name = path[1:i] + hasArgs = len(pathOut) > 0 + break + } + if path[i] == '|' { + pathOut = path[i:] + name = path[1:i] + break + } + if path[i] == '.' { + pathOut = path[i:] + name = path[1:i] + break + } + } + if fn, ok := modifiers[name]; ok { + var args string + if hasArgs { + var parsedArgs bool + switch pathOut[0] { + case '{', '[', '"': + res := Parse(pathOut) + if res.Exists() { + _, args = parseSquash(pathOut, 0) + pathOut = pathOut[len(args):] + parsedArgs = true + } + } + if !parsedArgs { + idx := strings.IndexByte(pathOut, '|') + if idx == -1 { + args = pathOut + pathOut = "" + } else { + args = pathOut[:idx] + pathOut = pathOut[idx:] + } + } + } + return pathOut, fn(json, args), true + } + return pathOut, res, false +} + +// DisableModifiers will disable the modifier syntax +var DisableModifiers = false + +var modifiers = map[string]func(json, arg string) string{ + "pretty": modPretty, + "ugly": modUgly, + "reverse": modReverse, +} + +// AddModifier binds a custom modifier command to the GJSON syntax. +// This operation is not thread safe and should be executed prior to +// using all other gjson function. +func AddModifier(name string, fn func(json, arg string) string) { + modifiers[name] = fn +} + +// ModifierExists returns true when the specified modifier exists. +func ModifierExists(name string, fn func(json, arg string) string) bool { + _, ok := modifiers[name] + return ok +} + +// @pretty modifier makes the json look nice. +func modPretty(json, arg string) string { + if len(arg) > 0 { + opts := *pretty.DefaultOptions + Parse(arg).ForEach(func(key, value Result) bool { + switch key.String() { + case "sortKeys": + opts.SortKeys = value.Bool() + case "indent": + opts.Indent = value.String() + case "prefix": + opts.Prefix = value.String() + case "width": + opts.Width = int(value.Int()) + } + return true + }) + return bytesString(pretty.PrettyOptions(stringBytes(json), &opts)) + } + return bytesString(pretty.Pretty(stringBytes(json))) +} + +// @ugly modifier removes all whitespace. +func modUgly(json, arg string) string { + return bytesString(pretty.Ugly(stringBytes(json))) +} + +// @reverse reverses array elements or root object members. +func modReverse(json, arg string) string { + res := Parse(json) + if res.IsArray() { + var values []Result + res.ForEach(func(_, value Result) bool { + values = append(values, value) + return true + }) + out := make([]byte, 0, len(json)) + out = append(out, '[') + for i, j := len(values)-1, 0; i >= 0; i, j = i-1, j+1 { + if j > 0 { + out = append(out, ',') + } + out = append(out, values[i].Raw...) + } + out = append(out, ']') + return bytesString(out) + } + if res.IsObject() { + var keyValues []Result + res.ForEach(func(key, value Result) bool { + keyValues = append(keyValues, key, value) + return true + }) + out := make([]byte, 0, len(json)) + out = append(out, '{') + for i, j := len(keyValues)-2, 0; i >= 0; i, j = i-2, j+1 { + if j > 0 { + out = append(out, ',') + } + out = append(out, keyValues[i+0].Raw...) + out = append(out, ':') + out = append(out, keyValues[i+1].Raw...) + } + out = append(out, '}') + return bytesString(out) + } + return json +} diff --git a/vendor/github.com/tidwall/gjson/gjson_gae.go b/vendor/github.com/tidwall/gjson/gjson_gae.go new file mode 100644 index 0000000..9586903 --- /dev/null +++ b/vendor/github.com/tidwall/gjson/gjson_gae.go @@ -0,0 +1,18 @@ +//+build appengine js + +package gjson + +func getBytes(json []byte, path string) Result { + return Get(string(json), path) +} +func fillIndex(json string, c *parseContext) { + // noop. Use zero for the Index value. +} + +func stringBytes(s string) []byte { + return []byte(s) +} + +func bytesString(b []byte) string { + return string(b) +} diff --git a/vendor/github.com/tidwall/gjson/gjson_ngae.go b/vendor/github.com/tidwall/gjson/gjson_ngae.go new file mode 100644 index 0000000..bc608b5 --- /dev/null +++ b/vendor/github.com/tidwall/gjson/gjson_ngae.go @@ -0,0 +1,81 @@ +//+build !appengine +//+build !js + +package gjson + +import ( + "reflect" + "unsafe" +) + +// getBytes casts the input json bytes to a string and safely returns the +// results as uniquely allocated data. This operation is intended to minimize +// copies and allocations for the large json string->[]byte. +func getBytes(json []byte, path string) Result { + var result Result + if json != nil { + // unsafe cast to string + result = Get(*(*string)(unsafe.Pointer(&json)), path) + // safely get the string headers + rawhi := *(*reflect.StringHeader)(unsafe.Pointer(&result.Raw)) + strhi := *(*reflect.StringHeader)(unsafe.Pointer(&result.Str)) + // create byte slice headers + rawh := reflect.SliceHeader{Data: rawhi.Data, Len: rawhi.Len} + strh := reflect.SliceHeader{Data: strhi.Data, Len: strhi.Len} + if strh.Data == 0 { + // str is nil + if rawh.Data == 0 { + // raw is nil + result.Raw = "" + } else { + // raw has data, safely copy the slice header to a string + result.Raw = string(*(*[]byte)(unsafe.Pointer(&rawh))) + } + result.Str = "" + } else if rawh.Data == 0 { + // raw is nil + result.Raw = "" + // str has data, safely copy the slice header to a string + result.Str = string(*(*[]byte)(unsafe.Pointer(&strh))) + } else if strh.Data >= rawh.Data && + int(strh.Data)+strh.Len <= int(rawh.Data)+rawh.Len { + // Str is a substring of Raw. + start := int(strh.Data - rawh.Data) + // safely copy the raw slice header + result.Raw = string(*(*[]byte)(unsafe.Pointer(&rawh))) + // substring the raw + result.Str = result.Raw[start : start+strh.Len] + } else { + // safely copy both the raw and str slice headers to strings + result.Raw = string(*(*[]byte)(unsafe.Pointer(&rawh))) + result.Str = string(*(*[]byte)(unsafe.Pointer(&strh))) + } + } + return result +} + +// fillIndex finds the position of Raw data and assigns it to the Index field +// of the resulting value. If the position cannot be found then Index zero is +// used instead. +func fillIndex(json string, c *parseContext) { + if len(c.value.Raw) > 0 && !c.calcd { + jhdr := *(*reflect.StringHeader)(unsafe.Pointer(&json)) + rhdr := *(*reflect.StringHeader)(unsafe.Pointer(&(c.value.Raw))) + c.value.Index = int(rhdr.Data - jhdr.Data) + if c.value.Index < 0 || c.value.Index >= len(json) { + c.value.Index = 0 + } + } +} + +func stringBytes(s string) []byte { + return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ + Data: (*reflect.StringHeader)(unsafe.Pointer(&s)).Data, + Len: len(s), + Cap: len(s), + })) +} + +func bytesString(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} diff --git a/vendor/github.com/tidwall/gjson/go.mod b/vendor/github.com/tidwall/gjson/go.mod new file mode 100644 index 0000000..d851688 --- /dev/null +++ b/vendor/github.com/tidwall/gjson/go.mod @@ -0,0 +1,8 @@ +module github.com/tidwall/gjson + +go 1.12 + +require ( + github.com/tidwall/match v1.0.1 + github.com/tidwall/pretty v1.0.0 +) diff --git a/vendor/github.com/tidwall/gjson/go.sum b/vendor/github.com/tidwall/gjson/go.sum new file mode 100644 index 0000000..a4a2d87 --- /dev/null +++ b/vendor/github.com/tidwall/gjson/go.sum @@ -0,0 +1,4 @@ +github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc= +github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= +github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= diff --git a/vendor/github.com/tidwall/gjson/logo.png b/vendor/github.com/tidwall/gjson/logo.png new file mode 100644 index 0000000..17a8bbe Binary files /dev/null and b/vendor/github.com/tidwall/gjson/logo.png differ diff --git a/vendor/github.com/tidwall/grect/LICENSE.md b/vendor/github.com/tidwall/grect/LICENSE.md new file mode 100644 index 0000000..58f5819 --- /dev/null +++ b/vendor/github.com/tidwall/grect/LICENSE.md @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2016 Josh Baker + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/tidwall/grect/README.md b/vendor/github.com/tidwall/grect/README.md new file mode 100644 index 0000000..04a8bf0 --- /dev/null +++ b/vendor/github.com/tidwall/grect/README.md @@ -0,0 +1,25 @@ +GRECT +==== + +Quickly get the outer rectangle for GeoJSON, WKT, WKB. + +```go + r := grect.Get(`{ + "type": "Polygon", + "coordinates": [ + [ [100.0, 0.0], [101.0, 0.0], [101.0, 1.0], + [100.0, 1.0], [100.0, 0.0] ] + ] + }`) + fmt.Printf("%v %v\n", r.Min, r.Max) + // Output: + // [100 0] [101 1] +``` + +## Contact +Josh Baker [@tidwall](http://twitter.com/tidwall) + +## License + +GRECT source code is available under the MIT [License](/LICENSE). + diff --git a/vendor/github.com/tidwall/grect/grect.go b/vendor/github.com/tidwall/grect/grect.go new file mode 100644 index 0000000..13eb761 --- /dev/null +++ b/vendor/github.com/tidwall/grect/grect.go @@ -0,0 +1,337 @@ +package grect + +import ( + "strconv" + "strings" + + "github.com/tidwall/gjson" +) + +type Rect struct { + Min, Max []float64 +} + +func (r Rect) String() string { + diff := len(r.Min) != len(r.Max) + if !diff { + for i := 0; i < len(r.Min); i++ { + if r.Min[i] != r.Max[i] { + diff = true + break + } + } + } + var buf []byte + buf = append(buf, '[') + for i, v := range r.Min { + if i > 0 { + buf = append(buf, ' ') + } + buf = append(buf, strconv.FormatFloat(v, 'f', -1, 64)...) + } + if diff { + buf = append(buf, ']', ',', '[') + for i, v := range r.Max { + if i > 0 { + buf = append(buf, ' ') + } + buf = append(buf, strconv.FormatFloat(v, 'f', -1, 64)...) + } + } + buf = append(buf, ']') + return string(buf) +} + +func normalize(min, max []float64) (nmin, nmax []float64) { + if len(max) == 0 { + return min, min + } else if len(max) != len(min) { + if len(max) < len(min) { + max = append(max, min[len(max):]...) + } else if len(min) < len(max) { + min = append(min, max[len(min):]...) + } + } + match := true + for i := 0; i < len(min); i++ { + if min[i] != max[i] { + if match { + match = false + } + if min[i] > max[i] { + min[i], max[i] = max[i], min[i] + } + } + } + if match { + return min, min + } + return min, max +} + +func Get(s string) Rect { + var i int + var ws bool + var min, max []float64 + for ; i < len(s); i++ { + switch s[i] { + default: + continue + case ' ', '\t', '\r', '\n': + ws = true + continue + case '[': + min, max, i = getRect(s, i) + case '{': + min, max, i = getGeoJSON(s, i) + case 0x00, 0x01: + if !ws { + // return parseWKB(s, i) + } + case 'p', 'P', 'l', 'L', 'm', 'M', 'g', 'G': + min, max, i = getWKT(s, i) + } + break + } + min, max = normalize(min, max) + return Rect{Min: min, Max: max} +} + +func getRect(s string, i int) (min, max []float64, ri int) { + a := s[i:] + parts := strings.Split(a, ",") + for i := 0; i < len(parts) && i < 2; i++ { + part := parts[i] + if len(part) > 0 && (part[0] <= ' ' || part[len(part)-1] <= ' ') { + part = strings.TrimSpace(part) + } + if len(part) >= 2 && part[0] == '[' && part[len(part)-1] == ']' { + pieces := strings.Split(part[1:len(part)-1], " ") + if i == 0 { + min = make([]float64, 0, len(pieces)) + } else { + max = make([]float64, 0, len(pieces)) + } + for j := 0; j < len(pieces); j++ { + piece := pieces[j] + if piece != "" { + n, _ := strconv.ParseFloat(piece, 64) + if i == 0 { + min = append(min, n) + } else { + max = append(max, n) + } + } + } + } + } + + // normalize + if len(parts) == 1 { + max = min + } else { + min, max = normalize(min, max) + } + + return min, max, len(s) +} + +func union(min1, max1, min2, max2 []float64) (umin, umax []float64) { + for i := 0; i < len(min1) || i < len(min2); i++ { + if i >= len(min1) { + // just copy min2 + umin = append(umin, min2[i]) + umax = append(umax, max2[i]) + } else if i >= len(min2) { + // just copy min1 + umin = append(umin, min1[i]) + umax = append(umax, max1[i]) + } else { + if min1[i] < min2[i] { + umin = append(umin, min1[i]) + } else { + umin = append(umin, min2[i]) + } + if max1[i] > max2[i] { + umax = append(umax, max1[i]) + } else { + umax = append(umax, max2[i]) + } + } + } + return umin, umax +} + +func getWKT(s string, i int) (min, max []float64, ri int) { + switch s[i] { + default: + for ; i < len(s); i++ { + if s[i] == ',' { + return nil, nil, i + } + if s[i] == '(' { + return getWKTAny(s, i) + } + } + return nil, nil, i + case 'g', 'G': + if len(s)-i < 18 { + return nil, nil, i + } + return getWKTGeometryCollection(s, i+18) + } +} + +func getWKTAny(s string, i int) (min, max []float64, ri int) { + min, max = make([]float64, 0, 4), make([]float64, 0, 4) + var depth int + var ni int + var idx int +loop: + for ; i < len(s); i++ { + switch s[i] { + default: + if ni == 0 { + ni = i + } + case '(': + depth++ + case ')', ' ', '\t', '\r', '\n', ',': + if ni != 0 { + n, _ := strconv.ParseFloat(s[ni:i], 64) + if idx >= len(min) { + min = append(min, n) + max = append(max, n) + } else { + if n < min[idx] { + min[idx] = n + } else if n > max[idx] { + max[idx] = n + } + } + idx++ + ni = 0 + } + switch s[i] { + case ')': + idx = 0 + depth-- + if depth == 0 { + i++ + break loop + } + case ',': + idx = 0 + } + } + } + return min, max, i +} + +func getWKTGeometryCollection(s string, i int) (min, max []float64, ri int) { + var depth int + for ; i < len(s); i++ { + if s[i] == ',' || s[i] == ')' { + // do not increment the index + return nil, nil, i + } + if s[i] == '(' { + depth++ + i++ + break + } + } +next: + for ; i < len(s); i++ { + switch s[i] { + case 'p', 'P', 'l', 'L', 'm', 'M', 'g', 'G': + var min2, max2 []float64 + min2, max2, i = getWKT(s, i) + min, max = union(min, max, min2, max2) + for ; i < len(s); i++ { + if s[i] == ',' { + i++ + goto next + } + if s[i] == ')' { + i++ + goto done + } + } + case ' ', '\t', '\r', '\n': + continue + default: + goto end_early + } + } +end_early: + // just balance the parens + for ; i < len(s); i++ { + if s[i] == '(' { + depth++ + } else if s[i] == ')' { + depth-- + if depth == 0 { + i++ + break + } + } + } +done: + return min, max, i +} +func getGeoJSON(s string, i int) (min, max []float64, ri int) { + json := s[i:] + switch gjson.Get(json, "type").String() { + default: + min, max = getMinMaxBrackets(gjson.Get(json, "coordinates").Raw) + case "Feature": + min, max, _ = getGeoJSON(gjson.Get(json, "geometry").String(), 0) + case "FeatureCollection": + for _, json := range gjson.Get(json, "features").Array() { + nmin, nmax, _ := getGeoJSON(json.String(), 0) + min, max = union(min, max, nmin, nmax) + } + case "GeometryCollection": + for _, json := range gjson.Get(json, "geometries").Array() { + nmin, nmax, _ := getGeoJSON(json.String(), 0) + min, max = union(min, max, nmin, nmax) + } + } + return min, max, len(json) +} + +func getMinMaxBrackets(s string) (min, max []float64) { + var ni int + var idx int + for i := 0; i < len(s); i++ { + switch s[i] { + default: + if ni == 0 { + ni = i + } + case '[', ',', ']', ' ', '\t', '\r', '\n': + if ni > 0 { + n, _ := strconv.ParseFloat(s[ni:i], 64) + if idx >= len(min) { + min = append(min, n) + max = append(max, n) + } else { + if n < min[idx] { + min[idx] = n + } else if n > max[idx] { + max[idx] = n + } + } + ni = 0 + idx++ + } + if s[i] == ']' { + idx = 0 + } + + } + } + + return +} diff --git a/vendor/github.com/tidwall/match/.travis.yml b/vendor/github.com/tidwall/match/.travis.yml new file mode 100644 index 0000000..4f2ee4d --- /dev/null +++ b/vendor/github.com/tidwall/match/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/github.com/tidwall/match/LICENSE b/vendor/github.com/tidwall/match/LICENSE new file mode 100644 index 0000000..58f5819 --- /dev/null +++ b/vendor/github.com/tidwall/match/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2016 Josh Baker + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/tidwall/match/README.md b/vendor/github.com/tidwall/match/README.md new file mode 100644 index 0000000..2aa5bc3 --- /dev/null +++ b/vendor/github.com/tidwall/match/README.md @@ -0,0 +1,32 @@ +Match +===== +Build Status +GoDoc + +Match is a very simple pattern matcher where '*' matches on any +number characters and '?' matches on any one character. + +Installing +---------- + +``` +go get -u github.com/tidwall/match +``` + +Example +------- + +```go +match.Match("hello", "*llo") +match.Match("jello", "?ello") +match.Match("hello", "h*o") +``` + + +Contact +------- +Josh Baker [@tidwall](http://twitter.com/tidwall) + +License +------- +Redcon source code is available under the MIT [License](/LICENSE). diff --git a/vendor/github.com/tidwall/match/match.go b/vendor/github.com/tidwall/match/match.go new file mode 100644 index 0000000..fcfe998 --- /dev/null +++ b/vendor/github.com/tidwall/match/match.go @@ -0,0 +1,181 @@ +// Match provides a simple pattern matcher with unicode support. +package match + +import "unicode/utf8" + +// Match returns true if str matches pattern. This is a very +// simple wildcard match where '*' matches on any number characters +// and '?' matches on any one character. + +// pattern: +// { term } +// term: +// '*' matches any sequence of non-Separator characters +// '?' matches any single non-Separator character +// c matches character c (c != '*', '?', '\\') +// '\\' c matches character c +// +func Match(str, pattern string) bool { + if pattern == "*" { + return true + } + return deepMatch(str, pattern) +} +func deepMatch(str, pattern string) bool { + for len(pattern) > 0 { + if pattern[0] > 0x7f { + return deepMatchRune(str, pattern) + } + switch pattern[0] { + default: + if len(str) == 0 { + return false + } + if str[0] > 0x7f { + return deepMatchRune(str, pattern) + } + if str[0] != pattern[0] { + return false + } + case '?': + if len(str) == 0 { + return false + } + case '*': + return deepMatch(str, pattern[1:]) || + (len(str) > 0 && deepMatch(str[1:], pattern)) + } + str = str[1:] + pattern = pattern[1:] + } + return len(str) == 0 && len(pattern) == 0 +} + +func deepMatchRune(str, pattern string) bool { + var sr, pr rune + var srsz, prsz int + + // read the first rune ahead of time + if len(str) > 0 { + if str[0] > 0x7f { + sr, srsz = utf8.DecodeRuneInString(str) + } else { + sr, srsz = rune(str[0]), 1 + } + } else { + sr, srsz = utf8.RuneError, 0 + } + if len(pattern) > 0 { + if pattern[0] > 0x7f { + pr, prsz = utf8.DecodeRuneInString(pattern) + } else { + pr, prsz = rune(pattern[0]), 1 + } + } else { + pr, prsz = utf8.RuneError, 0 + } + // done reading + for pr != utf8.RuneError { + switch pr { + default: + if srsz == utf8.RuneError { + return false + } + if sr != pr { + return false + } + case '?': + if srsz == utf8.RuneError { + return false + } + case '*': + return deepMatchRune(str, pattern[prsz:]) || + (srsz > 0 && deepMatchRune(str[srsz:], pattern)) + } + str = str[srsz:] + pattern = pattern[prsz:] + // read the next runes + if len(str) > 0 { + if str[0] > 0x7f { + sr, srsz = utf8.DecodeRuneInString(str) + } else { + sr, srsz = rune(str[0]), 1 + } + } else { + sr, srsz = utf8.RuneError, 0 + } + if len(pattern) > 0 { + if pattern[0] > 0x7f { + pr, prsz = utf8.DecodeRuneInString(pattern) + } else { + pr, prsz = rune(pattern[0]), 1 + } + } else { + pr, prsz = utf8.RuneError, 0 + } + // done reading + } + + return srsz == 0 && prsz == 0 +} + +var maxRuneBytes = func() []byte { + b := make([]byte, 4) + if utf8.EncodeRune(b, '\U0010FFFF') != 4 { + panic("invalid rune encoding") + } + return b +}() + +// Allowable parses the pattern and determines the minimum and maximum allowable +// values that the pattern can represent. +// When the max cannot be determined, 'true' will be returned +// for infinite. +func Allowable(pattern string) (min, max string) { + if pattern == "" || pattern[0] == '*' { + return "", "" + } + + minb := make([]byte, 0, len(pattern)) + maxb := make([]byte, 0, len(pattern)) + var wild bool + for i := 0; i < len(pattern); i++ { + if pattern[i] == '*' { + wild = true + break + } + if pattern[i] == '?' { + minb = append(minb, 0) + maxb = append(maxb, maxRuneBytes...) + } else { + minb = append(minb, pattern[i]) + maxb = append(maxb, pattern[i]) + } + } + if wild { + r, n := utf8.DecodeLastRune(maxb) + if r != utf8.RuneError { + if r < utf8.MaxRune { + r++ + if r > 0x7f { + b := make([]byte, 4) + nn := utf8.EncodeRune(b, r) + maxb = append(maxb[:len(maxb)-n], b[:nn]...) + } else { + maxb = append(maxb[:len(maxb)-n], byte(r)) + } + } + } + } + return string(minb), string(maxb) +} + +// IsPattern returns true if the string is a pattern. +func IsPattern(str string) bool { + for i := 0; i < len(str); i++ { + if str[i] == '*' || str[i] == '?' { + return true + } + } + return false +} diff --git a/vendor/github.com/tidwall/pretty/.travis.yml b/vendor/github.com/tidwall/pretty/.travis.yml new file mode 100644 index 0000000..4f2ee4d --- /dev/null +++ b/vendor/github.com/tidwall/pretty/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/github.com/tidwall/pretty/LICENSE b/vendor/github.com/tidwall/pretty/LICENSE new file mode 100644 index 0000000..993b83f --- /dev/null +++ b/vendor/github.com/tidwall/pretty/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2017 Josh Baker + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/tidwall/pretty/README.md b/vendor/github.com/tidwall/pretty/README.md new file mode 100644 index 0000000..d2b8864 --- /dev/null +++ b/vendor/github.com/tidwall/pretty/README.md @@ -0,0 +1,124 @@ +# Pretty +[![Build Status](https://img.shields.io/travis/tidwall/pretty.svg?style=flat-square)](https://travis-ci.org/tidwall/prettty) +[![Coverage Status](https://img.shields.io/badge/coverage-100%25-brightgreen.svg?style=flat-square)](http://gocover.io/github.com/tidwall/pretty) +[![GoDoc](https://img.shields.io/badge/api-reference-blue.svg?style=flat-square)](https://godoc.org/github.com/tidwall/pretty) + + +Pretty is a Go package that provides [fast](#performance) methods for formatting JSON for human readability, or to compact JSON for smaller payloads. + +Getting Started +=============== + +## Installing + +To start using Pretty, install Go and run `go get`: + +```sh +$ go get -u github.com/tidwall/pretty +``` + +This will retrieve the library. + +## Pretty + +Using this example: + +```json +{"name": {"first":"Tom","last":"Anderson"}, "age":37, +"children": ["Sara","Alex","Jack"], +"fav.movie": "Deer Hunter", "friends": [ + {"first": "Janet", "last": "Murphy", "age": 44} + ]} +``` + +The following code: +```go +result = pretty.Pretty(example) +``` + +Will format the json to: + +```json +{ + "name": { + "first": "Tom", + "last": "Anderson" + }, + "age": 37, + "children": ["Sara", "Alex", "Jack"], + "fav.movie": "Deer Hunter", + "friends": [ + { + "first": "Janet", + "last": "Murphy", + "age": 44 + } + ] +} +``` + +## Color + +Color will colorize the json for outputing to the screen. + +```json +result = pretty.Color(json, nil) +``` + +Will add color to the result for printing to the terminal. +The second param is used for a customizing the style, and passing nil will use the default `pretty.TerminalStyle`. + +## Ugly + +The following code: +```go +result = pretty.Ugly(example) +``` + +Will format the json to: + +```json +{"name":{"first":"Tom","last":"Anderson"},"age":37,"children":["Sara","Alex","Jack"],"fav.movie":"Deer Hunter","friends":[{"first":"Janet","last":"Murphy","age":44}]}``` +``` + + +## Customized output + +There's a `PrettyOptions(json, opts)` function which allows for customizing the output with the following options: + +```go +type Options struct { + // Width is an max column width for single line arrays + // Default is 80 + Width int + // Prefix is a prefix for all lines + // Default is an empty string + Prefix string + // Indent is the nested indentation + // Default is two spaces + Indent string + // SortKeys will sort the keys alphabetically + // Default is false + SortKeys bool +} +``` +## Performance + +Benchmarks of Pretty alongside the builtin `encoding/json` Indent/Compact methods. +``` +BenchmarkPretty-8 1000000 1283 ns/op 720 B/op 2 allocs/op +BenchmarkUgly-8 3000000 426 ns/op 240 B/op 1 allocs/op +BenchmarkUglyInPlace-8 5000000 340 ns/op 0 B/op 0 allocs/op +BenchmarkJSONIndent-8 300000 4628 ns/op 1069 B/op 4 allocs/op +BenchmarkJSONCompact-8 1000000 2469 ns/op 758 B/op 4 allocs/op +``` + +*These benchmarks were run on a MacBook Pro 15" 2.8 GHz Intel Core i7 using Go 1.7.* + +## Contact +Josh Baker [@tidwall](http://twitter.com/tidwall) + +## License + +Pretty source code is available under the MIT [License](/LICENSE). + diff --git a/vendor/github.com/tidwall/pretty/pretty.go b/vendor/github.com/tidwall/pretty/pretty.go new file mode 100644 index 0000000..0a922d0 --- /dev/null +++ b/vendor/github.com/tidwall/pretty/pretty.go @@ -0,0 +1,432 @@ +package pretty + +import ( + "sort" +) + +// Options is Pretty options +type Options struct { + // Width is an max column width for single line arrays + // Default is 80 + Width int + // Prefix is a prefix for all lines + // Default is an empty string + Prefix string + // Indent is the nested indentation + // Default is two spaces + Indent string + // SortKeys will sort the keys alphabetically + // Default is false + SortKeys bool +} + +// DefaultOptions is the default options for pretty formats. +var DefaultOptions = &Options{Width: 80, Prefix: "", Indent: " ", SortKeys: false} + +// Pretty converts the input json into a more human readable format where each +// element is on it's own line with clear indentation. +func Pretty(json []byte) []byte { return PrettyOptions(json, nil) } + +// PrettyOptions is like Pretty but with customized options. +func PrettyOptions(json []byte, opts *Options) []byte { + if opts == nil { + opts = DefaultOptions + } + buf := make([]byte, 0, len(json)) + if len(opts.Prefix) != 0 { + buf = append(buf, opts.Prefix...) + } + buf, _, _, _ = appendPrettyAny(buf, json, 0, true, + opts.Width, opts.Prefix, opts.Indent, opts.SortKeys, + 0, 0, -1) + if len(buf) > 0 { + buf = append(buf, '\n') + } + return buf +} + +// Ugly removes insignificant space characters from the input json byte slice +// and returns the compacted result. +func Ugly(json []byte) []byte { + buf := make([]byte, 0, len(json)) + return ugly(buf, json) +} + +// UglyInPlace removes insignificant space characters from the input json +// byte slice and returns the compacted result. This method reuses the +// input json buffer to avoid allocations. Do not use the original bytes +// slice upon return. +func UglyInPlace(json []byte) []byte { return ugly(json, json) } + +func ugly(dst, src []byte) []byte { + dst = dst[:0] + for i := 0; i < len(src); i++ { + if src[i] > ' ' { + dst = append(dst, src[i]) + if src[i] == '"' { + for i = i + 1; i < len(src); i++ { + dst = append(dst, src[i]) + if src[i] == '"' { + j := i - 1 + for ; ; j-- { + if src[j] != '\\' { + break + } + } + if (j-i)%2 != 0 { + break + } + } + } + } + } + } + return dst +} + +func appendPrettyAny(buf, json []byte, i int, pretty bool, width int, prefix, indent string, sortkeys bool, tabs, nl, max int) ([]byte, int, int, bool) { + for ; i < len(json); i++ { + if json[i] <= ' ' { + continue + } + if json[i] == '"' { + return appendPrettyString(buf, json, i, nl) + } + if (json[i] >= '0' && json[i] <= '9') || json[i] == '-' { + return appendPrettyNumber(buf, json, i, nl) + } + if json[i] == '{' { + return appendPrettyObject(buf, json, i, '{', '}', pretty, width, prefix, indent, sortkeys, tabs, nl, max) + } + if json[i] == '[' { + return appendPrettyObject(buf, json, i, '[', ']', pretty, width, prefix, indent, sortkeys, tabs, nl, max) + } + switch json[i] { + case 't': + return append(buf, 't', 'r', 'u', 'e'), i + 4, nl, true + case 'f': + return append(buf, 'f', 'a', 'l', 's', 'e'), i + 5, nl, true + case 'n': + return append(buf, 'n', 'u', 'l', 'l'), i + 4, nl, true + } + } + return buf, i, nl, true +} + +type pair struct { + kstart, kend int + vstart, vend int +} + +type byKey struct { + sorted bool + json []byte + pairs []pair +} + +func (arr *byKey) Len() int { + return len(arr.pairs) +} +func (arr *byKey) Less(i, j int) bool { + key1 := arr.json[arr.pairs[i].kstart+1 : arr.pairs[i].kend-1] + key2 := arr.json[arr.pairs[j].kstart+1 : arr.pairs[j].kend-1] + return string(key1) < string(key2) +} +func (arr *byKey) Swap(i, j int) { + arr.pairs[i], arr.pairs[j] = arr.pairs[j], arr.pairs[i] + arr.sorted = true +} + +func appendPrettyObject(buf, json []byte, i int, open, close byte, pretty bool, width int, prefix, indent string, sortkeys bool, tabs, nl, max int) ([]byte, int, int, bool) { + var ok bool + if width > 0 { + if pretty && open == '[' && max == -1 { + // here we try to create a single line array + max := width - (len(buf) - nl) + if max > 3 { + s1, s2 := len(buf), i + buf, i, _, ok = appendPrettyObject(buf, json, i, '[', ']', false, width, prefix, "", sortkeys, 0, 0, max) + if ok && len(buf)-s1 <= max { + return buf, i, nl, true + } + buf = buf[:s1] + i = s2 + } + } else if max != -1 && open == '{' { + return buf, i, nl, false + } + } + buf = append(buf, open) + i++ + var pairs []pair + if open == '{' && sortkeys { + pairs = make([]pair, 0, 8) + } + var n int + for ; i < len(json); i++ { + if json[i] <= ' ' { + continue + } + if json[i] == close { + if pretty { + if open == '{' && sortkeys { + buf = sortPairs(json, buf, pairs) + } + if n > 0 { + nl = len(buf) + buf = append(buf, '\n') + } + if buf[len(buf)-1] != open { + buf = appendTabs(buf, prefix, indent, tabs) + } + } + buf = append(buf, close) + return buf, i + 1, nl, open != '{' + } + if open == '[' || json[i] == '"' { + if n > 0 { + buf = append(buf, ',') + if width != -1 && open == '[' { + buf = append(buf, ' ') + } + } + var p pair + if pretty { + nl = len(buf) + buf = append(buf, '\n') + if open == '{' && sortkeys { + p.kstart = i + p.vstart = len(buf) + } + buf = appendTabs(buf, prefix, indent, tabs+1) + } + if open == '{' { + buf, i, nl, _ = appendPrettyString(buf, json, i, nl) + if sortkeys { + p.kend = i + } + buf = append(buf, ':') + if pretty { + buf = append(buf, ' ') + } + } + buf, i, nl, ok = appendPrettyAny(buf, json, i, pretty, width, prefix, indent, sortkeys, tabs+1, nl, max) + if max != -1 && !ok { + return buf, i, nl, false + } + if pretty && open == '{' && sortkeys { + p.vend = len(buf) + if p.kstart > p.kend || p.vstart > p.vend { + // bad data. disable sorting + sortkeys = false + } else { + pairs = append(pairs, p) + } + } + i-- + n++ + } + } + return buf, i, nl, open != '{' +} +func sortPairs(json, buf []byte, pairs []pair) []byte { + if len(pairs) == 0 { + return buf + } + vstart := pairs[0].vstart + vend := pairs[len(pairs)-1].vend + arr := byKey{false, json, pairs} + sort.Sort(&arr) + if !arr.sorted { + return buf + } + nbuf := make([]byte, 0, vend-vstart) + for i, p := range pairs { + nbuf = append(nbuf, buf[p.vstart:p.vend]...) + if i < len(pairs)-1 { + nbuf = append(nbuf, ',') + nbuf = append(nbuf, '\n') + } + } + return append(buf[:vstart], nbuf...) +} + +func appendPrettyString(buf, json []byte, i, nl int) ([]byte, int, int, bool) { + s := i + i++ + for ; i < len(json); i++ { + if json[i] == '"' { + var sc int + for j := i - 1; j > s; j-- { + if json[j] == '\\' { + sc++ + } else { + break + } + } + if sc%2 == 1 { + continue + } + i++ + break + } + } + return append(buf, json[s:i]...), i, nl, true +} + +func appendPrettyNumber(buf, json []byte, i, nl int) ([]byte, int, int, bool) { + s := i + i++ + for ; i < len(json); i++ { + if json[i] <= ' ' || json[i] == ',' || json[i] == ':' || json[i] == ']' || json[i] == '}' { + break + } + } + return append(buf, json[s:i]...), i, nl, true +} + +func appendTabs(buf []byte, prefix, indent string, tabs int) []byte { + if len(prefix) != 0 { + buf = append(buf, prefix...) + } + if len(indent) == 2 && indent[0] == ' ' && indent[1] == ' ' { + for i := 0; i < tabs; i++ { + buf = append(buf, ' ', ' ') + } + } else { + for i := 0; i < tabs; i++ { + buf = append(buf, indent...) + } + } + return buf +} + +// Style is the color style +type Style struct { + Key, String, Number [2]string + True, False, Null [2]string + Append func(dst []byte, c byte) []byte +} + +func hexp(p byte) byte { + switch { + case p < 10: + return p + '0' + default: + return (p - 10) + 'a' + } +} + +// TerminalStyle is for terminals +var TerminalStyle = &Style{ + Key: [2]string{"\x1B[94m", "\x1B[0m"}, + String: [2]string{"\x1B[92m", "\x1B[0m"}, + Number: [2]string{"\x1B[93m", "\x1B[0m"}, + True: [2]string{"\x1B[96m", "\x1B[0m"}, + False: [2]string{"\x1B[96m", "\x1B[0m"}, + Null: [2]string{"\x1B[91m", "\x1B[0m"}, + Append: func(dst []byte, c byte) []byte { + if c < ' ' && (c != '\r' && c != '\n' && c != '\t' && c != '\v') { + dst = append(dst, "\\u00"...) + dst = append(dst, hexp((c>>4)&0xF)) + return append(dst, hexp((c)&0xF)) + } + return append(dst, c) + }, +} + +// Color will colorize the json. The style parma is used for customizing +// the colors. Passing nil to the style param will use the default +// TerminalStyle. +func Color(src []byte, style *Style) []byte { + if style == nil { + style = TerminalStyle + } + apnd := style.Append + if apnd == nil { + apnd = func(dst []byte, c byte) []byte { + return append(dst, c) + } + } + type stackt struct { + kind byte + key bool + } + var dst []byte + var stack []stackt + for i := 0; i < len(src); i++ { + if src[i] == '"' { + key := len(stack) > 0 && stack[len(stack)-1].key + if key { + dst = append(dst, style.Key[0]...) + } else { + dst = append(dst, style.String[0]...) + } + dst = apnd(dst, '"') + for i = i + 1; i < len(src); i++ { + dst = apnd(dst, src[i]) + if src[i] == '"' { + j := i - 1 + for ; ; j-- { + if src[j] != '\\' { + break + } + } + if (j-i)%2 != 0 { + break + } + } + } + if key { + dst = append(dst, style.Key[1]...) + } else { + dst = append(dst, style.String[1]...) + } + } else if src[i] == '{' || src[i] == '[' { + stack = append(stack, stackt{src[i], src[i] == '{'}) + dst = apnd(dst, src[i]) + } else if (src[i] == '}' || src[i] == ']') && len(stack) > 0 { + stack = stack[:len(stack)-1] + dst = apnd(dst, src[i]) + } else if (src[i] == ':' || src[i] == ',') && len(stack) > 0 && stack[len(stack)-1].kind == '{' { + stack[len(stack)-1].key = !stack[len(stack)-1].key + dst = apnd(dst, src[i]) + } else { + var kind byte + if (src[i] >= '0' && src[i] <= '9') || src[i] == '-' { + kind = '0' + dst = append(dst, style.Number[0]...) + } else if src[i] == 't' { + kind = 't' + dst = append(dst, style.True[0]...) + } else if src[i] == 'f' { + kind = 'f' + dst = append(dst, style.False[0]...) + } else if src[i] == 'n' { + kind = 'n' + dst = append(dst, style.Null[0]...) + } else { + dst = apnd(dst, src[i]) + } + if kind != 0 { + for ; i < len(src); i++ { + if src[i] <= ' ' || src[i] == ',' || src[i] == ':' || src[i] == ']' || src[i] == '}' { + i-- + break + } + dst = apnd(dst, src[i]) + } + if kind == '0' { + dst = append(dst, style.Number[1]...) + } else if kind == 't' { + dst = append(dst, style.True[1]...) + } else if kind == 'f' { + dst = append(dst, style.False[1]...) + } else if kind == 'n' { + dst = append(dst, style.Null[1]...) + } + } + } + } + return dst +} diff --git a/vendor/github.com/tidwall/rtree/.travis.yml b/vendor/github.com/tidwall/rtree/.travis.yml new file mode 100644 index 0000000..4f2ee4d --- /dev/null +++ b/vendor/github.com/tidwall/rtree/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/github.com/tidwall/rtree/LICENSE b/vendor/github.com/tidwall/rtree/LICENSE new file mode 100644 index 0000000..1a6cb67 --- /dev/null +++ b/vendor/github.com/tidwall/rtree/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2016 Josh Baker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/tidwall/rtree/README.md b/vendor/github.com/tidwall/rtree/README.md new file mode 100644 index 0000000..53a845d --- /dev/null +++ b/vendor/github.com/tidwall/rtree/README.md @@ -0,0 +1,22 @@ +RTree implementation for Go +=========================== + +[![Build Status](https://travis-ci.org/tidwall/rtree.svg?branch=master)](https://travis-ci.org/tidwall/rtree) +[![GoDoc](https://godoc.org/github.com/tidwall/rtree?status.svg)](https://godoc.org/github.com/tidwall/rtree) + +This package provides an in-memory R-Tree implementation for Go, useful as a spatial data structure. +It has support for 1-20 dimensions, and can store and search multidimensions interchangably in the same tree. + +Authors +------- +* 1983 Original algorithm and test code by Antonin Guttman and Michael Stonebraker, UC Berkely +* 1994 ANCI C ported from original test code by Melinda Green +* 1995 Sphere volume fix for degeneracy problem submitted by Paul Brook +* 2004 Templated C++ port by Greg Douglas +* 2016 Go port by Josh Baker +* 2018 Added kNN and merged in some of the RBush logic by Vladimir Agafonkin + +License +------- +RTree source code is available under the MIT License. + diff --git a/vendor/github.com/tidwall/rtree/base/knn.go b/vendor/github.com/tidwall/rtree/base/knn.go new file mode 100644 index 0000000..6b26df3 --- /dev/null +++ b/vendor/github.com/tidwall/rtree/base/knn.go @@ -0,0 +1,98 @@ +package base + +import ( + "github.com/tidwall/tinyqueue" +) + +type queueItem struct { + node *treeNode + isItem bool + dist float64 +} + +func (item *queueItem) Less(b tinyqueue.Item) bool { + return item.dist < b.(*queueItem).dist +} + +// KNN returns items nearest to farthest. The dist param is the "box distance". +func (tr *RTree) KNN(min, max []float64, center bool, iter func(item interface{}, dist float64) bool) bool { + var isBox bool + knnPoint := make([]float64, tr.dims) + + bbox := &treeNode{min: min, max: max} + + for i := 0; i < tr.dims; i++ { + knnPoint[i] = (bbox.min[i] + bbox.max[i]) / 2 + if !isBox && bbox.min[i] != bbox.max[i] { + isBox = true + } + } + node := tr.data + queue := tinyqueue.New(nil) + for node != nil { + for i := 0; i < node.count; i++ { + child := node.children[i] + var dist float64 + if isBox { + dist = boxDistRect(bbox, child) + } else { + dist = boxDistPoint(knnPoint, child) + } + queue.Push(&queueItem{node: child, isItem: node.leaf, dist: dist}) + } + for queue.Len() > 0 && queue.Peek().(*queueItem).isItem { + item := queue.Pop().(*queueItem) + if !iter(item.node.unsafeItem().item, item.dist) { + return false + } + } + last := queue.Pop() + if last != nil { + node = (*treeNode)(last.(*queueItem).node) + } else { + node = nil + } + } + return true +} + +func boxDistRect(a, b *treeNode) float64 { + var dist float64 + for i := 0; i < len(a.min); i++ { + var min, max float64 + if a.min[i] > b.min[i] { + min = a.min[i] + } else { + min = b.min[i] + } + if a.max[i] < b.max[i] { + max = a.max[i] + } else { + max = b.max[i] + } + squared := min - max + if squared > 0 { + dist += squared * squared + } + } + return dist +} + +func boxDistPoint(point []float64, childBox *treeNode) float64 { + var dist float64 + for i := 0; i < len(point); i++ { + d := axisDist(point[i], childBox.min[i], childBox.max[i]) + dist += d * d + } + return dist +} + +func axisDist(k, min, max float64) float64 { + if k < min { + return min - k + } + if k <= max { + return 0 + } + return k - max +} diff --git a/vendor/github.com/tidwall/rtree/base/load.go b/vendor/github.com/tidwall/rtree/base/load.go new file mode 100644 index 0000000..bf6954f --- /dev/null +++ b/vendor/github.com/tidwall/rtree/base/load.go @@ -0,0 +1,97 @@ +package base + +import "math" + +// Load bulk load items into the R-tree. +func (tr *RTree) Load(mins, maxs [][]float64, items []interface{}) { + if len(items) < tr.minEntries { + for i := 0; i < len(items); i++ { + tr.Insert(mins[i], maxs[i], items[i]) + } + return + } + + // prefill the items + fitems := make([]*treeNode, len(items)) + for i := 0; i < len(items); i++ { + item := &treeItem{min: mins[i], max: maxs[i], item: items[i]} + fitems[i] = item.unsafeNode() + } + + // following equations are defined in the paper describing OMT + N := len(fitems) + M := tr.maxEntries + h := int(math.Ceil(math.Log(float64(N)) / math.Log(float64(M)))) + Nsubtree := int(math.Pow(float64(M), float64(h-1))) + S := int(math.Ceil(math.Sqrt(float64(N) / float64(Nsubtree)))) + + // sort by the initial axis + axis := 0 + sortByAxis(fitems, axis) + + // build the root node. it's split differently from the subtrees. + children := make([]*treeNode, 0, S) + for i := 0; i < S; i++ { + var part []*treeNode + if i == S-1 { + // last split + part = fitems[len(fitems)/S*i:] + } else { + part = fitems[len(fitems)/S*i : len(fitems)/S*(i+1)] + } + children = append(children, tr.omt(part, h-1, axis+1)) + } + + node := tr.createNode(children) + node.leaf = false + node.height = h + tr.calcBBox(node) + + if tr.data.count == 0 { + // save as is if tree is empty + tr.data = node + } else if tr.data.height == node.height { + // split root if trees have the same height + tr.splitRoot(tr.data, node) + } else { + if tr.data.height < node.height { + // swap trees if inserted one is bigger + tr.data, node = node, tr.data + } + + // insert the small tree into the large tree at appropriate level + tr.insert(node, nil, tr.data.height-node.height-1, true) + } +} + +func (tr *RTree) omt(fitems []*treeNode, h, axis int) *treeNode { + if len(fitems) <= tr.maxEntries { + // reached leaf level; return leaf + children := make([]*treeNode, len(fitems)) + copy(children, fitems) + node := tr.createNode(children) + node.height = h + tr.calcBBox(node) + return node + } + + // sort the items on a different axis than the previous level. + sortByAxis(fitems, axis%tr.dims) + children := make([]*treeNode, 0, tr.maxEntries) + partsz := len(fitems) / tr.maxEntries + for i := 0; i < tr.maxEntries; i++ { + var part []*treeNode + if i == tr.maxEntries-1 { + // last part + part = fitems[partsz*i:] + } else { + part = fitems[partsz*i : partsz*(i+1)] + } + children = append(children, tr.omt(part, h-1, axis+1)) + } + node := tr.createNode(children) + node.height = h + node.leaf = false + tr.calcBBox(node) + return node +} diff --git a/vendor/github.com/tidwall/rtree/base/rtree.go b/vendor/github.com/tidwall/rtree/base/rtree.go new file mode 100644 index 0000000..4a66235 --- /dev/null +++ b/vendor/github.com/tidwall/rtree/base/rtree.go @@ -0,0 +1,673 @@ +package base + +import ( + "math" + "unsafe" +) + +// precalculate infinity +var mathInfNeg = math.Inf(-1) +var mathInfPos = math.Inf(+1) + +type treeNode struct { + min, max []float64 + children []*treeNode + count int + height int + leaf bool +} + +func (node *treeNode) unsafeItem() *treeItem { + return (*treeItem)(unsafe.Pointer(node)) +} + +func (tr *RTree) createNode(children []*treeNode) *treeNode { + n := &treeNode{ + height: 1, + leaf: true, + children: make([]*treeNode, tr.maxEntries+1), + } + if len(children) > 0 { + n.count = len(children) + copy(n.children[:n.count], children) + } + n.min = make([]float64, tr.dims) + n.max = make([]float64, tr.dims) + for i := 0; i < tr.dims; i++ { + n.min[i] = mathInfPos + n.max[i] = mathInfNeg + } + return n +} + +func (node *treeNode) extend(b *treeNode) { + for i := 0; i < len(node.min); i++ { + if b.min[i] < node.min[i] { + node.min[i] = b.min[i] + } + if b.max[i] > node.max[i] { + node.max[i] = b.max[i] + } + } +} + +func (node *treeNode) area() float64 { + area := node.max[0] - node.min[0] + for i := 1; i < len(node.min); i++ { + area *= node.max[i] - node.min[i] + } + return area +} + +func (node *treeNode) enlargedAreaAxis(b *treeNode, axis int) float64 { + var max, min float64 + if b.max[axis] > node.max[axis] { + max = b.max[axis] + } else { + max = node.max[axis] + } + if b.min[axis] < node.min[axis] { + min = b.min[axis] + } else { + min = node.min[axis] + } + return max - min +} + +func (node *treeNode) enlargedArea(b *treeNode) float64 { + area := node.enlargedAreaAxis(b, 0) + for i := 1; i < len(node.min); i++ { + area *= node.enlargedAreaAxis(b, i) + } + return area +} + +func (node *treeNode) intersectionAreaAxis(b *treeNode, axis int) float64 { + var max, min float64 + if node.max[axis] < b.max[axis] { + max = node.max[axis] + } else { + max = b.max[axis] + } + if node.min[axis] > b.min[axis] { + min = node.min[axis] + } else { + min = b.min[axis] + } + if max > min { + return max - min + } + return 0 +} +func (node *treeNode) intersectionArea(b *treeNode) float64 { + area := node.intersectionAreaAxis(b, 0) + for i := 1; i < len(node.min); i++ { + area *= node.intersectionAreaAxis(b, i) + } + return area +} +func (node *treeNode) margin() float64 { + margin := node.max[0] - node.min[0] + for i := 1; i < len(node.min); i++ { + margin += node.max[i] - node.min[i] + } + return margin +} + +type result int + +const ( + not result = 0 + intersects result = 1 + contains result = 2 +) + +func (node *treeNode) overlaps(b *treeNode) result { + for i := 0; i < len(node.min); i++ { + if b.min[i] > node.max[i] || b.max[i] < node.min[i] { + return not + } + if node.min[i] > b.min[i] || b.max[i] > node.max[i] { + i++ + for ; i < len(node.min); i++ { + if b.min[i] > node.max[i] || b.max[i] < node.min[i] { + return not + } + } + return intersects + } + } + return contains +} + +func (node *treeNode) intersects(b *treeNode) bool { + for i := 0; i < len(node.min); i++ { + if b.min[i] > node.max[i] || b.max[i] < node.min[i] { + return false + } + } + return true +} + +func (node *treeNode) findItem(item interface{}) int { + for i := 0; i < node.count; i++ { + if node.children[i].unsafeItem().item == item { + return i + } + } + return -1 +} + +func (node *treeNode) contains(b *treeNode) bool { + for i := 0; i < len(node.min); i++ { + if node.min[i] > b.min[i] || b.max[i] > node.max[i] { + return false + } + } + return true +} + +func (node *treeNode) childCount() int { + if node.leaf { + return node.count + } + var n int + for i := 0; i < node.count; i++ { + n += node.children[i].childCount() + } + return n +} + +type treeItem struct { + min, max []float64 + item interface{} +} + +func (item *treeItem) unsafeNode() *treeNode { + return (*treeNode)(unsafe.Pointer(item)) +} + +// RTree is an R-tree +type RTree struct { + dims int + maxEntries int + minEntries int + data *treeNode // root node + // resusable fields, these help performance of common mutable operations. + reuse struct { + path []*treeNode // for reinsertion path + indexes []int // for remove function + stack []int // for bulk loading + } +} + +// New creates a new R-tree +func New(dims, maxEntries int) *RTree { + if dims <= 0 { + panic("invalid dimensions") + } + + tr := &RTree{} + tr.dims = dims + tr.maxEntries = int(math.Max(4, float64(maxEntries))) + tr.minEntries = int(math.Max(2, math.Ceil(float64(tr.maxEntries)*0.4))) + tr.data = tr.createNode(nil) + return tr +} + +// Insert inserts an item +func (tr *RTree) Insert(min, max []float64, item interface{}) { + if len(min) != tr.dims || len(max) != tr.dims { + panic("invalid dimensions") + } + if item == nil { + panic("nil item") + } + bbox := treeNode{min: min, max: max} + tr.insert(&bbox, item, tr.data.height-1, false) +} + +func (tr *RTree) insert(bbox *treeNode, item interface{}, level int, isNode bool) { + tr.reuse.path = tr.reuse.path[:0] + node, insertPath := tr.chooseSubtree(bbox, tr.data, level, tr.reuse.path) + if item == nil { + // item is only nil when bulk loading a node + if node.leaf { + panic("loading node into leaf") + } + node.children[node.count] = bbox + node.count++ + } else { + ti := &treeItem{min: bbox.min, max: bbox.max, item: item} + node.children[node.count] = ti.unsafeNode() + node.count++ + } + node.extend(bbox) + for level >= 0 { + if insertPath[level].count > tr.maxEntries { + insertPath = tr.split(insertPath, level) + level-- + } else { + break + } + } + tr.adjustParentBBoxes(bbox, insertPath, level) + tr.reuse.path = insertPath +} + +func (tr *RTree) adjustParentBBoxes(bbox *treeNode, path []*treeNode, level int) { + // adjust bboxes along the given tree path + for i := level; i >= 0; i-- { + path[i].extend(bbox) + } +} + +func (tr *RTree) chooseSubtree(bbox, node *treeNode, level int, path []*treeNode) (*treeNode, []*treeNode) { + var targetNode *treeNode + var area, enlargement, minArea, minEnlargement float64 + for { + path = append(path, node) + if node.leaf || len(path)-1 == level { + break + } + minEnlargement = mathInfPos + minArea = minEnlargement + for i := 0; i < node.count; i++ { + child := node.children[i] + area = child.area() + enlargement = bbox.enlargedArea(child) - area + if enlargement < minEnlargement { + minEnlargement = enlargement + if area < minArea { + minArea = area + } + targetNode = child + } else if enlargement == minEnlargement { + if area < minArea { + minArea = area + targetNode = child + } + } + } + if targetNode != nil { + node = targetNode + } else if node.count > 0 { + node = (*treeNode)(node.children[0]) + } else { + node = nil + } + } + return node, path +} +func (tr *RTree) split(insertPath []*treeNode, level int) []*treeNode { + var node = insertPath[level] + var M = node.count + var m = tr.minEntries + + tr.chooseSplitAxis(node, m, M) + splitIndex := tr.chooseSplitIndex(node, m, M) + + spliced := make([]*treeNode, node.count-splitIndex) + copy(spliced, node.children[splitIndex:]) + node.count = splitIndex + + newNode := tr.createNode(spliced) + newNode.height = node.height + newNode.leaf = node.leaf + + tr.calcBBox(node) + tr.calcBBox(newNode) + + if level != 0 { + insertPath[level-1].children[insertPath[level-1].count] = newNode + insertPath[level-1].count++ + } else { + tr.splitRoot(node, newNode) + } + return insertPath +} +func (tr *RTree) chooseSplitIndex(node *treeNode, m, M int) int { + var i int + var bbox1, bbox2 *treeNode + var overlap, area, minOverlap, minArea float64 + var index int + + minArea = mathInfPos + minOverlap = minArea + + for i = m; i <= M-m; i++ { + bbox1 = tr.distBBox(node, 0, i, nil) + bbox2 = tr.distBBox(node, i, M, nil) + + overlap = bbox1.intersectionArea(bbox2) + area = bbox1.area() + bbox2.area() + + // choose distribution with minimum overlap + if overlap < minOverlap { + minOverlap = overlap + index = i + + if area < minArea { + minArea = area + } + } else if overlap == minOverlap { + // otherwise choose distribution with minimum area + if area < minArea { + minArea = area + index = i + } + } + } + return index +} +func (tr *RTree) calcBBox(node *treeNode) { + tr.distBBox(node, 0, node.count, node) +} +func (tr *RTree) chooseSplitAxis(node *treeNode, m, M int) { + minMargin := tr.allDistMargin(node, m, M, 0) + var minAxis int + for axis := 1; axis < tr.dims; axis++ { + margin := tr.allDistMargin(node, m, M, axis) + if margin < minMargin { + minMargin = margin + minAxis = axis + } + } + if minAxis < tr.dims { + tr.sortNodes(node, minAxis) + } +} +func (tr *RTree) splitRoot(node, newNode *treeNode) { + tr.data = tr.createNode([]*treeNode{node, newNode}) + tr.data.height = node.height + 1 + tr.data.leaf = false + tr.calcBBox(tr.data) +} +func (tr *RTree) distBBox(node *treeNode, k, p int, destNode *treeNode) *treeNode { + if destNode == nil { + destNode = tr.createNode(nil) + } else { + for i := 0; i < tr.dims; i++ { + destNode.min[i] = mathInfPos + destNode.max[i] = mathInfNeg + } + } + for i := k; i < p; i++ { + if node.leaf { + destNode.extend(node.children[i]) + } else { + destNode.extend((*treeNode)(node.children[i])) + } + } + return destNode +} +func (tr *RTree) allDistMargin(node *treeNode, m, M int, axis int) float64 { + tr.sortNodes(node, axis) + + var leftBBox = tr.distBBox(node, 0, m, nil) + var rightBBox = tr.distBBox(node, M-m, M, nil) + var margin = leftBBox.margin() + rightBBox.margin() + + var i int + + if node.leaf { + for i = m; i < M-m; i++ { + leftBBox.extend(node.children[i]) + margin += leftBBox.margin() + } + for i = M - m - 1; i >= m; i-- { + leftBBox.extend(node.children[i]) + margin += rightBBox.margin() + } + } else { + for i = m; i < M-m; i++ { + child := (*treeNode)(node.children[i]) + leftBBox.extend(child) + margin += leftBBox.margin() + } + for i = M - m - 1; i >= m; i-- { + child := (*treeNode)(node.children[i]) + leftBBox.extend(child) + margin += rightBBox.margin() + } + } + return margin +} +func (tr *RTree) sortNodes(node *treeNode, axis int) { + sortByAxis(node.children[:node.count], axis) +} + +func sortByAxis(items []*treeNode, axis int) { + if len(items) < 2 { + return + } + left, right := 0, len(items)-1 + pivotIndex := len(items) / 2 + items[pivotIndex], items[right] = items[right], items[pivotIndex] + for i := range items { + if items[i].min[axis] < items[right].min[axis] { + items[i], items[left] = items[left], items[i] + left++ + } + } + items[left], items[right] = items[right], items[left] + sortByAxis(items[:left], axis) + sortByAxis(items[left+1:], axis) +} + +// Search searches the tree for items in the input rectangle +func (tr *RTree) Search(min, max []float64, iter func(item interface{}) bool) bool { + bbox := &treeNode{min: min, max: max} + if !tr.data.intersects(bbox) { + return true + } + return tr.search(tr.data, bbox, iter) +} + +func (tr *RTree) search(node, bbox *treeNode, iter func(item interface{}) bool) bool { + if node.leaf { + for i := 0; i < node.count; i++ { + if bbox.intersects(node.children[i]) { + if !iter(node.children[i].unsafeItem().item) { + return false + } + } + } + } else { + for i := 0; i < node.count; i++ { + r := bbox.overlaps(node.children[i]) + if r == intersects { + if !tr.search(node.children[i], bbox, iter) { + return false + } + } else if r == contains { + if !scan(node.children[i], iter) { + return false + } + } + } + } + return true +} + +func (tr *RTree) IsEmpty() bool { + empty := true + tr.Scan(func(item interface{}) bool { + empty = false + return false + }) + return empty +} + +// Remove removes an item from the R-tree. +func (tr *RTree) Remove(min, max []float64, item interface{}) { + bbox := &treeNode{min: min, max: max} + tr.remove(bbox, item) +} + +func (tr *RTree) remove(bbox *treeNode, item interface{}) { + path := tr.reuse.path[:0] + indexes := tr.reuse.indexes[:0] + + var node = tr.data + var i int + var parent *treeNode + var index int + var goingUp bool + + for node != nil || len(path) != 0 { + if node == nil { + node = path[len(path)-1] + path = path[:len(path)-1] + if len(path) == 0 { + parent = nil + } else { + parent = path[len(path)-1] + } + i = indexes[len(indexes)-1] + indexes = indexes[:len(indexes)-1] + goingUp = true + } + + if node.leaf { + index = node.findItem(item) + if index != -1 { + // item found, remove the item and condense tree upwards + copy(node.children[index:], node.children[index+1:]) + node.children[node.count-1] = nil + node.count-- + path = append(path, node) + tr.condense(path) + goto done + } + } + if !goingUp && !node.leaf && node.contains(bbox) { // go down + path = append(path, node) + indexes = append(indexes, i) + i = 0 + parent = node + node = (*treeNode)(node.children[0]) + } else if parent != nil { // go right + i++ + if i == parent.count { + node = nil + } else { + node = (*treeNode)(parent.children[i]) + } + goingUp = false + } else { + node = nil + } + } +done: + tr.reuse.path = path + tr.reuse.indexes = indexes + return +} +func (tr *RTree) condense(path []*treeNode) { + // go through the path, removing empty nodes and updating bboxes + var siblings []*treeNode + for i := len(path) - 1; i >= 0; i-- { + if path[i].count == 0 { + if i > 0 { + siblings = path[i-1].children[:path[i-1].count] + index := -1 + for j := 0; j < len(siblings); j++ { + if siblings[j] == path[i] { + index = j + break + } + } + copy(siblings[index:], siblings[index+1:]) + siblings[len(siblings)-1] = nil + path[i-1].count-- + //siblings = siblings[:len(siblings)-1] + //path[i-1].children = siblings + } else { + tr.data = tr.createNode(nil) // clear tree + } + } else { + tr.calcBBox(path[i]) + } + } +} + +// Count returns the number of items in the R-tree. +func (tr *RTree) Count() int { + return tr.data.childCount() +} + +// Traverse iterates over the entire R-tree and includes all nodes and items. +func (tr *RTree) Traverse(iter func(min, max []float64, level int, item interface{}) bool) bool { + return tr.traverse(tr.data, iter) +} + +func (tr *RTree) traverse(node *treeNode, iter func(min, max []float64, level int, item interface{}) bool) bool { + if !iter(node.min, node.max, int(node.height), nil) { + return false + } + if node.leaf { + for i := 0; i < node.count; i++ { + child := node.children[i] + if !iter(child.min, child.max, 0, child.unsafeItem().item) { + return false + } + } + } else { + for i := 0; i < node.count; i++ { + child := node.children[i] + if !tr.traverse(child, iter) { + return false + } + } + } + return true +} + +// Scan iterates over the entire R-tree +func (tr *RTree) Scan(iter func(item interface{}) bool) bool { + return scan(tr.data, iter) +} + +func scan(node *treeNode, iter func(item interface{}) bool) bool { + if node.leaf { + for i := 0; i < node.count; i++ { + child := node.children[i] + if !iter(child.unsafeItem().item) { + return false + } + } + } else { + for i := 0; i < node.count; i++ { + child := node.children[i] + if !scan(child, iter) { + return false + } + } + } + return true +} + +// Bounds returns the bounding box of the entire R-tree +func (tr *RTree) Bounds() (min, max []float64) { + if tr.data.count > 0 { + return tr.data.min, tr.data.max + } + return make([]float64, tr.dims), make([]float64, tr.dims) +} + +// Complexity returns the complexity of the R-tree. The higher the value, the +// more complex the tree. The value of 1 is the lowest. +func (tr *RTree) Complexity() float64 { + var nodeCount int + var itemCount int + tr.Traverse(func(_, _ []float64, level int, _ interface{}) bool { + if level == 0 { + itemCount++ + } else { + nodeCount++ + } + return true + }) + return float64(tr.maxEntries*nodeCount) / float64(itemCount) +} diff --git a/vendor/github.com/tidwall/rtree/rtree.go b/vendor/github.com/tidwall/rtree/rtree.go new file mode 100644 index 0000000..bbbb1ff --- /dev/null +++ b/vendor/github.com/tidwall/rtree/rtree.go @@ -0,0 +1,278 @@ +package rtree + +import ( + "math" + "sync" + + "github.com/tidwall/rtree/base" +) + +type Iterator func(item Item) bool +type Item interface { + Rect(ctx interface{}) (min []float64, max []float64) +} + +type RTree struct { + dims int + maxEntries int + ctx interface{} + trs []*base.RTree + used int +} + +func New(ctx interface{}) *RTree { + tr := &RTree{ + ctx: ctx, + dims: 20, + maxEntries: 13, + } + tr.trs = make([]*base.RTree, 20) + return tr +} + +func (tr *RTree) Insert(item Item) { + if item == nil { + panic("nil item") + } + min, max := item.Rect(tr.ctx) + if len(min) != len(max) { + return // just return + panic("invalid item rectangle") + } + if len(min) < 1 || len(min) > len(tr.trs) { + return // just return + panic("invalid dimension") + } + btr := tr.trs[len(min)-1] + if btr == nil { + btr = base.New(len(min), tr.maxEntries) + tr.trs[len(min)-1] = btr + tr.used++ + } + amin := make([]float64, len(min)) + amax := make([]float64, len(max)) + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + btr.Insert(amin, amax, item) +} + +func (tr *RTree) Remove(item Item) { + if item == nil { + panic("nil item") + } + min, max := item.Rect(tr.ctx) + if len(min) != len(max) { + return // just return + panic("invalid item rectangle") + } + if len(min) < 1 || len(min) > len(tr.trs) { + return // just return + panic("invalid dimension") + } + btr := tr.trs[len(min)-1] + if btr == nil { + return + } + amin := make([]float64, len(min)) + amax := make([]float64, len(max)) + for i := 0; i < len(min); i++ { + amin[i], amax[i] = min[i], max[i] + } + btr.Remove(amin, amax, item) + if btr.IsEmpty() { + tr.trs[len(min)-1] = nil + tr.used-- + } +} +func (tr *RTree) Reset() { + for i := 0; i < len(tr.trs); i++ { + tr.trs[i] = nil + } + tr.used = 0 +} +func (tr *RTree) Count() int { + var count int + for _, btr := range tr.trs { + if btr != nil { + count += btr.Count() + } + } + return count +} + +func (tr *RTree) Search(bounds Item, iter Iterator) { + if bounds == nil { + panic("nil bounds being used for search") + } + min, max := bounds.Rect(tr.ctx) + if len(min) != len(max) { + return // just return + panic("invalid item rectangle") + } + if len(min) < 1 || len(min) > len(tr.trs) { + return // just return + panic("invalid dimension") + } + used := tr.used + for i, btr := range tr.trs { + if used == 0 { + break + } + if btr != nil { + if !search(btr, min, max, i+1, iter) { + return + } + used-- + } + } +} +func search(btr *base.RTree, min, max []float64, dims int, iter Iterator) bool { + amin := make([]float64, dims) + amax := make([]float64, dims) + for i := 0; i < dims; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + var ended bool + btr.Search(amin, amax, func(item interface{}) bool { + if !iter(item.(Item)) { + ended = true + return false + } + return true + }) + return !ended +} + +func (tr *RTree) KNN(bounds Item, center bool, iter func(item Item, dist float64) bool) { + if bounds == nil { + panic("nil bounds being used for search") + } + min, max := bounds.Rect(tr.ctx) + if len(min) != len(max) { + return // just return + panic("invalid item rectangle") + } + if len(min) < 1 || len(min) > len(tr.trs) { + return // just return + panic("invalid dimension") + } + + if tr.used == 0 { + return + } + if tr.used == 1 { + for i, btr := range tr.trs { + if btr != nil { + knn(btr, min, max, center, i+1, func(item interface{}, dist float64) bool { + return iter(item.(Item), dist) + }) + break + } + } + return + } + + type queueT struct { + done bool + step int + item Item + dist float64 + } + + var mu sync.Mutex + var ended bool + queues := make(map[int][]queueT) + cond := sync.NewCond(&mu) + for i, btr := range tr.trs { + if btr != nil { + dims := i + 1 + mu.Lock() + queues[dims] = []queueT{} + cond.Signal() + mu.Unlock() + go func(dims int, btr *base.RTree) { + knn(btr, min, max, center, dims, func(item interface{}, dist float64) bool { + mu.Lock() + if ended { + mu.Unlock() + return false + } + queues[dims] = append(queues[dims], queueT{item: item.(Item), dist: dist}) + cond.Signal() + mu.Unlock() + return true + }) + mu.Lock() + queues[dims] = append(queues[dims], queueT{done: true}) + cond.Signal() + mu.Unlock() + }(dims, btr) + } + } + mu.Lock() + for { + ready := true + for i := range queues { + if len(queues[i]) == 0 { + ready = false + break + } + if queues[i][0].done { + delete(queues, i) + } + } + if len(queues) == 0 { + break + } + if ready { + var j int + var minDist float64 + var minItem Item + var minQueue int + for i := range queues { + if j == 0 || queues[i][0].dist < minDist { + minDist = queues[i][0].dist + minItem = queues[i][0].item + minQueue = i + } + } + queues[minQueue] = queues[minQueue][1:] + if !iter(minItem, minDist) { + ended = true + break + } + continue + } + cond.Wait() + } + mu.Unlock() +} +func knn(btr *base.RTree, min, max []float64, center bool, dims int, iter func(item interface{}, dist float64) bool) bool { + amin := make([]float64, dims) + amax := make([]float64, dims) + for i := 0; i < dims; i++ { + if i < len(min) { + amin[i] = min[i] + amax[i] = max[i] + } else { + amin[i] = math.Inf(-1) + amax[i] = math.Inf(+1) + } + } + var ended bool + btr.KNN(amin, amax, center, func(item interface{}, dist float64) bool { + if !iter(item.(Item), dist) { + ended = true + return false + } + return true + }) + return !ended +} diff --git a/vendor/github.com/tidwall/tinyqueue/LICENSE b/vendor/github.com/tidwall/tinyqueue/LICENSE new file mode 100644 index 0000000..2b7cd9d --- /dev/null +++ b/vendor/github.com/tidwall/tinyqueue/LICENSE @@ -0,0 +1,15 @@ +ISC License + +Copyright (c) 2017, Vladimir Agafonkin + +Permission to use, copy, modify, and/or distribute this software for any purpose +with or without fee is hereby granted, provided that the above copyright notice +and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF +THIS SOFTWARE. diff --git a/vendor/github.com/tidwall/tinyqueue/README.md b/vendor/github.com/tidwall/tinyqueue/README.md new file mode 100644 index 0000000..f4edc91 --- /dev/null +++ b/vendor/github.com/tidwall/tinyqueue/README.md @@ -0,0 +1,7 @@ +# tinyqueue +GoDoc + +tinyqueue is a Go package for binary heap priority queues. +Ported from the [tinyqueue](https://github.com/mourner/tinyqueue) Javascript library. + + diff --git a/vendor/github.com/tidwall/tinyqueue/tinyqueue.go b/vendor/github.com/tidwall/tinyqueue/tinyqueue.go new file mode 100644 index 0000000..4a06258 --- /dev/null +++ b/vendor/github.com/tidwall/tinyqueue/tinyqueue.go @@ -0,0 +1,86 @@ +package tinyqueue + +type Queue struct { + length int + data []Item +} + +type Item interface { + Less(Item) bool +} + +func New(data []Item) *Queue { + q := &Queue{} + q.data = data + q.length = len(data) + if q.length > 0 { + i := q.length >> 1 + for ; i >= 0; i-- { + q.down(i) + } + } + return q +} + +func (q *Queue) Push(item Item) { + q.data = append(q.data, item) + q.length++ + q.up(q.length - 1) +} +func (q *Queue) Pop() Item { + if q.length == 0 { + return nil + } + top := q.data[0] + q.length-- + if q.length > 0 { + q.data[0] = q.data[q.length] + q.down(0) + } + q.data = q.data[:len(q.data)-1] + return top +} +func (q *Queue) Peek() Item { + if q.length == 0 { + return nil + } + return q.data[0] +} +func (q *Queue) Len() int { + return q.length +} +func (q *Queue) down(pos int) { + data := q.data + halfLength := q.length >> 1 + item := data[pos] + for pos < halfLength { + left := (pos << 1) + 1 + right := left + 1 + best := data[left] + if right < q.length && data[right].Less(best) { + left = right + best = data[right] + } + if !best.Less(item) { + break + } + data[pos] = best + pos = left + } + data[pos] = item +} + +func (q *Queue) up(pos int) { + data := q.data + item := data[pos] + for pos > 0 { + parent := (pos - 1) >> 1 + current := data[parent] + if !item.Less(current) { + break + } + data[pos] = current + pos = parent + } + data[pos] = item +} diff --git a/vendor/modules.txt b/vendor/modules.txt index b3e8fba..c07fdc1 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -4,6 +4,23 @@ github.com/go-chi/chi github.com/go-yaml/yaml # github.com/sethvargo/go-password v0.1.2 github.com/sethvargo/go-password/password +# github.com/tidwall/btree v0.0.0-20170113224114-9876f1454cf0 +github.com/tidwall/btree +# github.com/tidwall/buntdb v1.1.0 +github.com/tidwall/buntdb +# github.com/tidwall/gjson v1.3.2 +github.com/tidwall/gjson +# github.com/tidwall/grect v0.0.0-20161006141115-ba9a043346eb +github.com/tidwall/grect +# github.com/tidwall/match v1.0.1 +github.com/tidwall/match +# github.com/tidwall/pretty v1.0.0 +github.com/tidwall/pretty +# github.com/tidwall/rtree v0.0.0-20180113144539-6cd427091e0e +github.com/tidwall/rtree +github.com/tidwall/rtree/base +# github.com/tidwall/tinyqueue v0.0.0-20180302190814-1e39f5511563 +github.com/tidwall/tinyqueue # golang.org/x/crypto v0.0.0-20181112202954-3d3f9f413869 golang.org/x/crypto/bcrypt golang.org/x/crypto/blowfish