Browse Source

working but primitive

Eugene 5 years ago
parent
commit
3be483cb7a
84 changed files with 8853 additions and 14 deletions
  1. 19 0
      .travis.yml
  2. 0 1
      Dockerfile
  3. 51 1
      Gopkg.lock
  4. 2 2
      README.md
  5. 8 8
      app/main.go
  6. 1 2
      app/publisher/publisher.go
  7. 2 0
      app/rss/notify.go
  8. 22 0
      docker-compose.yml
  9. 8 0
      vendor/github.com/ChimeraCoder/anaconda/.appveyor.yml
  10. 6 0
      vendor/github.com/ChimeraCoder/anaconda/.gitignore
  11. 36 0
      vendor/github.com/ChimeraCoder/anaconda/.travis.yml
  12. 1 0
      vendor/github.com/ChimeraCoder/anaconda/COPYING
  13. 39 0
      vendor/github.com/ChimeraCoder/anaconda/Gopkg.lock
  14. 38 0
      vendor/github.com/ChimeraCoder/anaconda/Gopkg.toml
  15. 7 0
      vendor/github.com/ChimeraCoder/anaconda/LICENSE
  16. 110 0
      vendor/github.com/ChimeraCoder/anaconda/README
  17. 1 0
      vendor/github.com/ChimeraCoder/anaconda/README.md
  18. 22 0
      vendor/github.com/ChimeraCoder/anaconda/account.go
  19. 45 0
      vendor/github.com/ChimeraCoder/anaconda/backoff.go
  20. 54 0
      vendor/github.com/ChimeraCoder/anaconda/blocks.go
  21. 32 0
      vendor/github.com/ChimeraCoder/anaconda/configuration.go
  22. 15 0
      vendor/github.com/ChimeraCoder/anaconda/directmessage.go
  23. 57 0
      vendor/github.com/ChimeraCoder/anaconda/directmessages.go
  24. 114 0
      vendor/github.com/ChimeraCoder/anaconda/errors.go
  25. 11 0
      vendor/github.com/ChimeraCoder/anaconda/favorites.go
  26. 289 0
      vendor/github.com/ChimeraCoder/anaconda/friends_followers.go
  27. 57 0
      vendor/github.com/ChimeraCoder/anaconda/geosearch.go
  28. 26 0
      vendor/github.com/ChimeraCoder/anaconda/list.go
  29. 87 0
      vendor/github.com/ChimeraCoder/anaconda/lists.go
  30. 91 0
      vendor/github.com/ChimeraCoder/anaconda/log.go
  31. 89 0
      vendor/github.com/ChimeraCoder/anaconda/media.go
  32. 54 0
      vendor/github.com/ChimeraCoder/anaconda/mutes.go
  33. 59 0
      vendor/github.com/ChimeraCoder/anaconda/oembed.go
  34. 35 0
      vendor/github.com/ChimeraCoder/anaconda/place.go
  35. 30 0
      vendor/github.com/ChimeraCoder/anaconda/rate_limit_status.go
  36. 40 0
      vendor/github.com/ChimeraCoder/anaconda/relationship.go
  37. 57 0
      vendor/github.com/ChimeraCoder/anaconda/search.go
  38. 318 0
      vendor/github.com/ChimeraCoder/anaconda/streaming.go
  39. 45 0
      vendor/github.com/ChimeraCoder/anaconda/timeline.go
  40. 64 0
      vendor/github.com/ChimeraCoder/anaconda/trends.go
  41. 154 0
      vendor/github.com/ChimeraCoder/anaconda/tweet.go
  42. 107 0
      vendor/github.com/ChimeraCoder/anaconda/tweets.go
  43. 370 0
      vendor/github.com/ChimeraCoder/anaconda/twitter.go
  44. 74 0
      vendor/github.com/ChimeraCoder/anaconda/twitter_entities.go
  45. 53 0
      vendor/github.com/ChimeraCoder/anaconda/twitter_user.go
  46. 89 0
      vendor/github.com/ChimeraCoder/anaconda/users.go
  47. 78 0
      vendor/github.com/ChimeraCoder/anaconda/webhook.go
  48. 4 0
      vendor/github.com/ChimeraCoder/tokenbucket/.gitignore
  49. 165 0
      vendor/github.com/ChimeraCoder/tokenbucket/COPYING
  50. 1 0
      vendor/github.com/ChimeraCoder/tokenbucket/LICENSE
  51. 48 0
      vendor/github.com/ChimeraCoder/tokenbucket/README
  52. 1 0
      vendor/github.com/ChimeraCoder/tokenbucket/README.md
  53. 86 0
      vendor/github.com/ChimeraCoder/tokenbucket/tokenbucket.go
  54. 22 0
      vendor/github.com/azr/backoff/.gitignore
  55. 2 0
      vendor/github.com/azr/backoff/.travis.yml
  56. 20 0
      vendor/github.com/azr/backoff/LICENSE
  57. 22 0
      vendor/github.com/azr/backoff/README.md
  58. 51 0
      vendor/github.com/azr/backoff/backoff.go
  59. 112 0
      vendor/github.com/azr/backoff/exponential.go
  60. 44 0
      vendor/github.com/azr/backoff/linear.go
  61. 2 0
      vendor/github.com/dustin/go-jsonpointer/.gitignore
  62. 19 0
      vendor/github.com/dustin/go-jsonpointer/LICENSE
  63. 5 0
      vendor/github.com/dustin/go-jsonpointer/README.markdown
  64. 328 0
      vendor/github.com/dustin/go-jsonpointer/bytes.go
  65. 2 0
      vendor/github.com/dustin/go-jsonpointer/doc.go
  66. 38 0
      vendor/github.com/dustin/go-jsonpointer/map.go
  67. 171 0
      vendor/github.com/dustin/go-jsonpointer/reflect.go
  68. 2 0
      vendor/github.com/dustin/gojson/.gitignore
  69. 27 0
      vendor/github.com/dustin/gojson/LICENSE
  70. 1089 0
      vendor/github.com/dustin/gojson/decode.go
  71. 1183 0
      vendor/github.com/dustin/gojson/encode.go
  72. 143 0
      vendor/github.com/dustin/gojson/fold.go
  73. 137 0
      vendor/github.com/dustin/gojson/indent.go
  74. 629 0
      vendor/github.com/dustin/gojson/scanner.go
  75. 200 0
      vendor/github.com/dustin/gojson/stream.go
  76. 44 0
      vendor/github.com/dustin/gojson/tags.go
  77. 707 0
      vendor/github.com/garyburd/go-oauth/oauth/oauth.go
  78. 13 0
      vendor/github.com/garyburd/go-oauth/oauth/oauth16.go
  79. 12 0
      vendor/github.com/garyburd/go-oauth/oauth/oauth17.go
  80. 56 0
      vendor/golang.org/x/net/context/context.go
  81. 72 0
      vendor/golang.org/x/net/context/go17.go
  82. 20 0
      vendor/golang.org/x/net/context/go19.go
  83. 300 0
      vendor/golang.org/x/net/context/pre_go17.go
  84. 109 0
      vendor/golang.org/x/net/context/pre_go19.go

+ 19 - 0
.travis.yml

@@ -0,0 +1,19 @@
+install:
+- docker --version
+- docker-compose --version
+
+script:
+- docker build
+  --build-arg COVERALLS_TOKEN=$COVERALLS_TOKEN
+  --build-arg CI=$CI
+  --build-arg TRAVIS=$TRAVIS
+  --build-arg TRAVIS_BRANCH=$TRAVIS_BRANCH
+  --build-arg TRAVIS_COMMIT=$TRAVIS_COMMIT
+  --build-arg TRAVIS_JOB_ID=$TRAVIS_JOB_ID
+  --build-arg TRAVIS_JOB_NUMBER=$TRAVIS_JOB_NUMBER
+  --build-arg TRAVIS_OS_NAME=$TRAVIS_OS_NAME
+  --build-arg TRAVIS_PULL_REQUEST=$TRAVIS_PULL_REQUEST
+  --build-arg TRAVIS_PULL_REQUEST_SHA=$TRAVIS_PULL_REQUEST_SHA
+  --build-arg TRAVIS_REPO_SLUG=$TRAVIS_REPO_SLUG
+  --build-arg TRAVIS_TAG=$TRAVIS_TAG
+  .

+ 0 - 1
Dockerfile

@@ -49,7 +49,6 @@ COPY --from=build /go/src/github.com/umputun/rss2twitter/rss2twitter /srv/rss2tw
 RUN chown -R app:app /srv
 RUN chmod +x /srv/rss2twitter
 
-EXPOSE 7070
 WORKDIR /srv
 
 CMD ["/srv/rss2twitter"]

+ 51 - 1
Gopkg.lock

@@ -2,6 +2,22 @@
 
 
 [[projects]]
+  digest = "1:2ee1ec060a2e71d2eb24755cb3f3867c52faa9895d2287d3c3bf09b01020cf32"
+  name = "github.com/ChimeraCoder/anaconda"
+  packages = ["."]
+  pruneopts = "UT"
+  revision = "9c68684170b980d5b4c2ed08fae9b530e659904d"
+  version = "v2.0.0"
+
+[[projects]]
+  branch = "master"
+  digest = "1:088fd1efa8fcd4f85c3e3c0d135f28114115517a2b215f2e39b67146d032ea33"
+  name = "github.com/ChimeraCoder/tokenbucket"
+  packages = ["."]
+  pruneopts = "UT"
+  revision = "c5a927568de7aad8a58127d80bcd36ca4e71e454"
+
+[[projects]]
   digest = "1:573fa46f8d413d4bc3f7cc5e86b2c43cb21559f4fb0a19d9874d228e28fdc07c"
   name = "github.com/PuerkitoBio/goquery"
   packages = ["."]
@@ -18,6 +34,14 @@
   version = "v1.0.0"
 
 [[projects]]
+  branch = "master"
+  digest = "1:863cb3d06b96794fd14a54579116b7e704cd8bc395e9d0404a5ad236d3e198b4"
+  name = "github.com/azr/backoff"
+  packages = ["."]
+  pruneopts = "UT"
+  revision = "53511d3c733003985b0b76f733df1f4d0095ee6a"
+
+[[projects]]
   digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec"
   name = "github.com/davecgh/go-spew"
   packages = ["spew"]
@@ -26,6 +50,30 @@
   version = "v1.1.1"
 
 [[projects]]
+  branch = "master"
+  digest = "1:efe7dc6d12119a36c1a617b574e99ac9c5ca8fbff57eb283a60462552b7c782a"
+  name = "github.com/dustin/go-jsonpointer"
+  packages = ["."]
+  pruneopts = "UT"
+  revision = "ba0abeacc3dcca5b9b20f31509c46794edbc9965"
+
+[[projects]]
+  branch = "master"
+  digest = "1:07263fbfa1c038e890b410f730279aa7adc217180f1c2f78c6dc80b3bbe9e819"
+  name = "github.com/dustin/gojson"
+  packages = ["."]
+  pruneopts = "UT"
+  revision = "2e71ec9dd5adce3b168cd0dbde03b5cc04951c30"
+
+[[projects]]
+  branch = "master"
+  digest = "1:b5cfd8d62c46082a75974f2f5e167c272f7f7d47c7a21caf49b17a45a558a601"
+  name = "github.com/garyburd/go-oauth"
+  packages = ["oauth"]
+  pruneopts = "UT"
+  revision = "bca2e7f09a178fd36b034107a00e2323bca6a82e"
+
+[[projects]]
   digest = "1:16ae35b3a854c667baaf55ff5d455c486f7c2baf040a2727f2ef0e4b096b2a95"
   name = "github.com/hashicorp/logutils"
   packages = ["."]
@@ -84,9 +132,10 @@
 
 [[projects]]
   branch = "master"
-  digest = "1:5193d913046443e59093d66a97a40c51f4a5ea4ceba60f3b3ecf89694de5d16f"
+  digest = "1:6d5ed712653ea5321fe3e3475ab2188cf362a4e0d31e9fd3acbd4dfbbca0d680"
   name = "golang.org/x/net"
   packages = [
+    "context",
     "html",
     "html/atom",
     "html/charset",
@@ -124,6 +173,7 @@
   analyzer-name = "dep"
   analyzer-version = 1
   input-imports = [
+    "github.com/ChimeraCoder/anaconda",
     "github.com/hashicorp/logutils",
     "github.com/jessevdk/go-flags",
     "github.com/mmcdole/gofeed",

+ 2 - 2
README.md

@@ -1,2 +1,2 @@
-# rss2twitter
-publish rss updates to twitter
+# rss2twitter - publish RSS updates to twitter
+

+ 8 - 8
app/main.go

@@ -15,14 +15,14 @@ import (
 )
 
 var opts struct {
-	Refresh time.Duration `short:"r" long:"refresh" env:"REFRESH" default:"30" description:"refresh interval"`
-	TimeOut time.Duration `short:"t" long:"timeout" env:"TIMEOUT" default:"5" description:"twitter timeout"`
-	Feed    string        `short:"f" long:"feed" env:"FEED" default:"" description:"rss feed url"`
+	Refresh time.Duration `short:"r" long:"refresh" env:"REFRESH" default:"30s" description:"refresh interval"`
+	TimeOut time.Duration `short:"t" long:"timeout" env:"TIMEOUT" default:"5s" description:"twitter timeout"`
+	Feed    string        `short:"f" long:"feed" env:"FEED" required:"true" description:"rss feed url"`
 
-	ConsumerKey    string `long:"consumer-key" env:"CONSUMER_KEY" default:"" description:"twitter consumer key"`
-	ConsumerSecret string `long:"consumer-secret" env:"CONSUMER_SECRET" default:"" description:"twitter consumer secret"`
-	AccessToken    string `long:"access-token" env:"ACCESS_TOKEN" default:"" description:"twitter access token"`
-	AccessSecret   string `long:"access-secret" env:"ACCESS_SECRET" default:"" description:"twitter access secret"`
+	ConsumerKey    string `long:"consumer-key" env:"CONSUMER_KEY" required:"true" description:"twitter consumer key"`
+	ConsumerSecret string `long:"consumer-secret" env:"CONSUMER_SECRET" required:"true" description:"twitter consumer secret"`
+	AccessToken    string `long:"access-token" env:"ACCESS_TOKEN" required:"true" description:"twitter access token"`
+	AccessSecret   string `long:"access-secret" env:"ACCESS_SECRET" required:"true" description:"twitter access secret"`
 
 	Dbg bool `long:"dbg" env:"DEBUG" description:"debug mode"`
 }
@@ -30,7 +30,7 @@ var opts struct {
 var revision = "unknown"
 
 func main() {
-	fmt.Printf("RSS2TWITTER - %s", revision)
+	fmt.Printf("RSS2TWITTER - %s\n", revision)
 	if _, err := flags.Parse(&opts); err != nil {
 		os.Exit(1)
 	}

+ 1 - 2
app/publisher/publisher.go

@@ -3,7 +3,6 @@ package publisher
 import (
 	"log"
 	"net/url"
-	"time"
 
 	"github.com/ChimeraCoder/anaconda"
 	"github.com/umputun/rss2twitter/app/rss"
@@ -31,8 +30,8 @@ type Twitter struct {
 
 // Publish to twitter
 func (t Twitter) Publish(event rss.Event, formatter func(rss.Event) string) error {
+	log.Printf("[INFO] publish %+v", event)
 	api := anaconda.NewTwitterApiWithCredentials(t.AccessToken, t.AccessSecret, t.ConsumerKey, t.ConsumerSecret)
-	api.SetDelay(5 * time.Second)
 	_, err := api.PostTweet(formatter(event), url.Values{})
 	return err
 }

+ 2 - 0
app/rss/notify.go

@@ -30,6 +30,7 @@ type Event struct {
 func New(ctx context.Context, feed string, duration time.Duration) *Notify {
 	res := Notify{feed: feed, duration: duration}
 	res.ctx, res.cancel = context.WithCancel(ctx)
+	log.Printf("[INFO] crate notifier for %q, %s", feed, duration)
 	return &res
 }
 
@@ -54,6 +55,7 @@ func (n *Notify) Go() <-chan Event {
 			event := n.feedEvent(feedData)
 			if lastGUID != event.guid {
 				if lastGUID != "" {
+					log.Printf("[DEBUG] new event %s", event.guid)
 					ch <- event
 				}
 				lastGUID = event.guid

+ 22 - 0
docker-compose.yml

@@ -0,0 +1,22 @@
+version: '2'
+
+services:
+
+  rss2twitter:
+    image: umputun/rss2twitter:latest
+    container_name: nginx
+    hostname: nginx
+    restart: always
+
+    logging: &default_logging
+      driver: json-file
+      options:
+        max-size: "10m"
+        max-file: "5"
+
+    environment:
+      - FEED=http://lorem-rss.herokuapp.com/feed?unit=second&interval=10
+      - CONSUMER_KEY
+      - CONSUMER_SECRET
+      - ACCESS_TOKEN
+      - ACCESS_SECRET

+ 8 - 0
vendor/github.com/ChimeraCoder/anaconda/.appveyor.yml

@@ -0,0 +1,8 @@
+clone_folder: c:\gopath\src\github.com\ChimeraCoder\anaconda
+
+environment:
+    GOPATH: c:\gopath
+
+build_script:
+    - go get .
+    - go test -race -v -timeout 120s

+ 6 - 0
vendor/github.com/ChimeraCoder/anaconda/.gitignore

@@ -0,0 +1,6 @@
+*.swp
+*.swo
+*.swn
+conf.sh
+*.patch
+anaconda.test

+ 36 - 0
vendor/github.com/ChimeraCoder/anaconda/.travis.yml

@@ -0,0 +1,36 @@
+language: go
+
+os:
+  - linux
+  - osx
+
+install:
+  # Use gofmt from Go 1.9 for the pre-build check on all builds
+  - if [ "$TRAVIS_OS_NAME" = "osx" ]; then wget -O go.tar.gz https://storage.googleapis.com/golang/go1.9.darwin-amd64.tar.gz; else wget -O go.tar.gz https://storage.googleapis.com/golang/go1.9.linux-amd64.tar.gz; fi
+  - tar -C /tmp -xvf go.tar.gz go/bin/gofmt
+  - rm go.tar.gz
+
+before_script:
+  - /tmp/go/bin/gofmt -w .
+
+  # If `go generate` or `gofmt` yielded any changes,
+  # this will fail with an error message like "too many arguments"
+  # or "M: binary operator expected" and show the diff.
+  - git diff
+  - git add .
+  - git diff-index --cached --exit-code HEAD
+
+go:
+  - 1.7
+  - 1.8
+  - 1.9
+  - tip
+
+matrix:
+    include:
+        - os: linux
+          go: 1.6
+
+script:
+  - echo $TRAVIS_GO_VERSION
+  - if [ "$TRAVIS_GO_VERSION" == "1.6" ] || [ "$TRAVIS_GO_VERSION" == "1.7" ] || [ "$TRAVIS_GO_VERSION" == "1.8" ]; then go list ./... | grep -v vendor | xargs go test -race -v -timeout 60s; else go test -race -v -timeout 60s ./...; fi

+ 1 - 0
vendor/github.com/ChimeraCoder/anaconda/COPYING

@@ -0,0 +1 @@
+LICENSE

+ 39 - 0
vendor/github.com/ChimeraCoder/anaconda/Gopkg.lock

@@ -0,0 +1,39 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+  branch = "master"
+  name = "github.com/ChimeraCoder/tokenbucket"
+  packages = ["."]
+  revision = "c5a927568de7aad8a58127d80bcd36ca4e71e454"
+
+[[projects]]
+  branch = "master"
+  name = "github.com/azr/backoff"
+  packages = ["."]
+  revision = "53511d3c733003985b0b76f733df1f4d0095ee6a"
+
+[[projects]]
+  branch = "master"
+  name = "github.com/dustin/go-jsonpointer"
+  packages = ["."]
+  revision = "ba0abeacc3dcca5b9b20f31509c46794edbc9965"
+
+[[projects]]
+  branch = "master"
+  name = "github.com/dustin/gojson"
+  packages = ["."]
+  revision = "2e71ec9dd5adce3b168cd0dbde03b5cc04951c30"
+
+[[projects]]
+  branch = "master"
+  name = "github.com/garyburd/go-oauth"
+  packages = ["oauth"]
+  revision = "166ce8d672783fbb5a72247c3cf459267717e1ec"
+
+[solve-meta]
+  analyzer-name = "dep"
+  analyzer-version = 1
+  inputs-digest = "e645e975b86556d43a1fb9a6aacbaa500a8549d3262d8421baca41f04ae42f4f"
+  solver-name = "gps-cdcl"
+  solver-version = 1

+ 38 - 0
vendor/github.com/ChimeraCoder/anaconda/Gopkg.toml

@@ -0,0 +1,38 @@
+
+# Gopkg.toml example
+#
+# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
+# for detailed Gopkg.toml documentation.
+#
+# required = ["github.com/user/thing/cmd/thing"]
+# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
+#
+# [[constraint]]
+#   name = "github.com/user/project"
+#   version = "1.0.0"
+#
+# [[constraint]]
+#   name = "github.com/user/project2"
+#   branch = "dev"
+#   source = "github.com/myfork/project2"
+#
+# [[override]]
+#  name = "github.com/x/y"
+#  version = "2.4.0"
+
+
+[[constraint]]
+  branch = "master"
+  name = "github.com/ChimeraCoder/tokenbucket"
+
+[[constraint]]
+  branch = "master"
+  name = "github.com/azr/backoff"
+
+[[constraint]]
+  branch = "master"
+  name = "github.com/dustin/go-jsonpointer"
+
+[[constraint]]
+  branch = "master"
+  name = "github.com/garyburd/go-oauth"

+ 7 - 0
vendor/github.com/ChimeraCoder/anaconda/LICENSE

@@ -0,0 +1,7 @@
+Copyright (c) 2013 Aditya Mukerjee, Quotidian Ventures
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE

+ 110 - 0
vendor/github.com/ChimeraCoder/anaconda/README

@@ -0,0 +1,110 @@
+Anaconda
+====================
+
+[![Build Status](https://travis-ci.org/ChimeraCoder/anaconda.svg?branch=master)](https://travis-ci.org/ChimeraCoder/anaconda) [![Build Status](https://ci.appveyor.com/api/projects/status/63pi6csod8bps80i/branch/master?svg=true)](https://ci.appveyor.com/project/ChimeraCoder/anaconda/branch/master) [![GoDoc](https://godoc.org/github.com/ChimeraCoder/anaconda?status.svg)](https://godoc.org/github.com/ChimeraCoder/anaconda)
+
+Anaconda is a simple, transparent Go package for accessing version 1.1 of the Twitter API.
+
+Successful API queries return native Go structs that can be used immediately, with no need for type assertions.
+
+
+
+Examples
+-------------
+
+### Authentication
+
+If you already have the access token (and secret) for your user (Twitter provides this for your own account on the developer portal), creating the client is simple:
+
+````go
+api := anaconda.NewTwitterApiWithCredentials("your-access-token", "your-access-token-secret", "your-consumer-key", "your-consumer-secret")
+````
+
+### Queries
+
+Queries are conducted using a pointer to an authenticated `TwitterApi` struct. In v1.1 of Twitter's API, all requests should be authenticated.
+
+````go
+searchResult, _ := api.GetSearch("golang", nil)
+for _ , tweet := range searchResult.Statuses {
+    fmt.Println(tweet.Text)
+}
+````
+Certain endpoints allow separate optional parameter; if desired, these can be passed as the final parameter.
+
+````go
+//Perhaps we want 30 values instead of the default 15
+v := url.Values{}
+v.Set("count", "30")
+result, err := api.GetSearch("golang", v)
+````
+
+(Remember that `url.Values` is equivalent to a `map[string][]string`, if you find that more convenient notation when specifying values). Otherwise, `nil` suffices.
+
+
+
+Endpoints
+------------
+
+Anaconda implements most of the endpoints defined in the [Twitter API documentation](https://developer.twitter.com/en/docs). For clarity, in most cases, the function name is simply the name of the HTTP method and the endpoint (e.g., the endpoint `GET /friendships/incoming` is provided by the function `GetFriendshipsIncoming`).
+
+In a few cases, a shortened form has been chosen to make life easier (for example, retweeting is simply the function `Retweet`)
+
+
+
+Error Handling, Rate Limiting, and Throttling
+---------------------------------
+
+### Error Handling
+
+Twitter errors are returned as an `ApiError`, which satisfies the `error` interface and can be treated as a vanilla `error`. However, it also contains the additional information returned by the Twitter API that may be useful in deciding how to proceed after encountering an error.
+
+
+If you make queries too quickly, you may bump against Twitter's [rate limits](https://developer.twitter.com/en/docs/basics/rate-limits). If this happens, `anaconda` automatically retries the query when the rate limit resets, using the `X-Rate-Limit-Reset` header that Twitter provides to determine how long to wait.
+
+In other words, users of the `anaconda` library should not need to handle rate limiting errors themselves; this is handled seamlessly behind-the-scenes. If an error is returned by a function, another form of error must have occurred (which can be checked by using the fields provided by the `ApiError` struct).
+
+
+(If desired, this feature can be turned off by calling `ReturnRateLimitError(true)`.)
+
+
+### Throttling
+
+Anaconda now supports automatic client-side throttling of queries to avoid hitting the Twitter rate-limit.
+
+This is currently *off* by default; however, it may be turned on by default in future versions of the library, as the implementation is improved.
+
+
+To set a delay between queries, use the `SetDelay` method:
+
+````go
+    api.SetDelay(10 * time.Second)
+````
+
+Delays are set specific to each `TwitterApi` struct, so queries that use different users' access credentials are completely independent.
+
+
+To turn off automatic throttling, set the delay to `0`:
+
+````go
+    api.SetDelay(0 * time.Second)
+````
+
+### Query Queue Persistence
+
+If your code creates a NewTwitterApi in a regularly called function, you'll need to call `.Close()` on the API struct to clear the queryQueue and allow the goroutine to exit. Otherwise you could see goroutine and therefor heap memory leaks in long-running applications.
+
+### Google App Engine
+
+Since Google App Engine doesn't make the standard `http.Transport` available, it's necessary to tell Anaconda to use a different client context.
+
+````go
+	api = anaconda.NewTwitterApi("", "")
+	c := appengine.NewContext(r)
+	api.HttpClient.Transport = &urlfetch.Transport{Context: c}
+````
+
+
+License
+-----------
+Anaconda is free software licensed under the MIT/X11 license. Details provided in the LICENSE file.

+ 1 - 0
vendor/github.com/ChimeraCoder/anaconda/README.md

@@ -0,0 +1 @@
+README

+ 22 - 0
vendor/github.com/ChimeraCoder/anaconda/account.go

@@ -0,0 +1,22 @@
+package anaconda
+
+import (
+	"net/url"
+)
+
+// Verify the credentials by making a very small request
+func (a TwitterApi) VerifyCredentials() (ok bool, err error) {
+	v := cleanValues(nil)
+	v.Set("include_entities", "false")
+	v.Set("skip_status", "true")
+
+	_, err = a.GetSelf(v)
+	return err == nil, err
+}
+
+// Get the user object for the authenticated user. Requests /account/verify_credentials
+func (a TwitterApi) GetSelf(v url.Values) (u User, err error) {
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/account/verify_credentials.json", v, &u, _GET, response_ch}
+	return u, (<-response_ch).err
+}

+ 45 - 0
vendor/github.com/ChimeraCoder/anaconda/backoff.go

@@ -0,0 +1,45 @@
+package anaconda
+
+import (
+	"time"
+
+	"github.com/azr/backoff"
+)
+
+/*
+Reconnecting(from https://developer.twitter.com/en/docs/tutorials/consuming-streaming-data) :
+
+Once an established connection drops, attempt to reconnect immediately.
+If the reconnect fails, slow down your reconnect attempts according to the type of error experienced:
+*/
+
+//Back off linearly for TCP/IP level network errors.
+//	These problems are generally temporary and tend to clear quickly.
+//	Increase the delay in reconnects by 250ms each attempt, up to 16 seconds.
+func NewTCPIPErrBackoff() backoff.Interface {
+	return backoff.NewLinear(0, time.Second*16, time.Millisecond*250, 1)
+}
+
+//Back off exponentially for HTTP errors for which reconnecting would be appropriate.
+//	Start with a 5 second wait, doubling each attempt, up to 320 seconds.
+func NewHTTPErrBackoff() backoff.Interface {
+	eb := backoff.NewExponential()
+	eb.InitialInterval = time.Second * 5
+	eb.MaxInterval = time.Second * 320
+	eb.Multiplier = 2
+	eb.Reset()
+	return eb
+}
+
+// Back off exponentially for HTTP 420 errors.
+// 	Start with a 1 minute wait and double each attempt.
+// 	Note that every HTTP 420 received increases the time you must
+// 	wait until rate limiting will no longer will be in effect for your account.
+func NewHTTP420ErrBackoff() backoff.Interface {
+	eb := backoff.NewExponential()
+	eb.InitialInterval = time.Minute * 1
+	eb.Multiplier = 2
+	eb.MaxInterval = time.Minute * 20
+	eb.Reset()
+	return eb
+}

+ 54 - 0
vendor/github.com/ChimeraCoder/anaconda/blocks.go

@@ -0,0 +1,54 @@
+package anaconda
+
+import (
+	"net/url"
+	"strconv"
+)
+
+func (a TwitterApi) GetBlocksList(v url.Values) (c UserCursor, err error) {
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/blocks/list.json", v, &c, _GET, response_ch}
+	return c, (<-response_ch).err
+}
+
+func (a TwitterApi) GetBlocksIds(v url.Values) (c Cursor, err error) {
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/blocks/ids.json", v, &c, _GET, response_ch}
+	return c, (<-response_ch).err
+}
+
+func (a TwitterApi) BlockUser(screenName string, v url.Values) (user User, err error) {
+	v = cleanValues(v)
+	v.Set("screen_name", screenName)
+	return a.Block(v)
+}
+
+func (a TwitterApi) BlockUserId(id int64, v url.Values) (user User, err error) {
+	v = cleanValues(v)
+	v.Set("user_id", strconv.FormatInt(id, 10))
+	return a.Block(v)
+}
+
+func (a TwitterApi) Block(v url.Values) (user User, err error) {
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/blocks/create.json", v, &user, _POST, response_ch}
+	return user, (<-response_ch).err
+}
+
+func (a TwitterApi) UnblockUser(screenName string, v url.Values) (user User, err error) {
+	v = cleanValues(v)
+	v.Set("screen_name", screenName)
+	return a.Unblock(v)
+}
+
+func (a TwitterApi) UnblockUserId(id int64, v url.Values) (user User, err error) {
+	v = cleanValues(v)
+	v.Set("user_id", strconv.FormatInt(id, 10))
+	return a.Unblock(v)
+}
+
+func (a TwitterApi) Unblock(v url.Values) (user User, err error) {
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/blocks/destroy.json", v, &user, _POST, response_ch}
+	return user, (<-response_ch).err
+}

+ 32 - 0
vendor/github.com/ChimeraCoder/anaconda/configuration.go

@@ -0,0 +1,32 @@
+package anaconda
+
+import (
+	"net/url"
+)
+
+type Configuration struct {
+	CharactersReservedPerMedia int      `json:"characters_reserved_per_media"`
+	MaxMediaPerUpload          int      `json:"max_media_per_upload"`
+	NonUsernamePaths           []string `json:"non_username_paths"`
+	PhotoSizeLimit             int      `json:"photo_size_limit"`
+	PhotoSizes                 struct {
+		Thumb  photoSize `json:"thumb"`
+		Small  photoSize `json:"small"`
+		Medium photoSize `json:"medium"`
+		Large  photoSize `json:"large"`
+	} `json:"photo_sizes"`
+	ShortUrlLength      int `json:"short_url_length"`
+	ShortUrlLengthHttps int `json:"short_url_length_https"`
+}
+
+type photoSize struct {
+	H      int    `json:"h"`
+	W      int    `json:"w"`
+	Resize string `json:"resize"`
+}
+
+func (a TwitterApi) GetConfiguration(v url.Values) (conf Configuration, err error) {
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/help/configuration.json", v, &conf, _GET, response_ch}
+	return conf, (<-response_ch).err
+}

+ 15 - 0
vendor/github.com/ChimeraCoder/anaconda/directmessage.go

@@ -0,0 +1,15 @@
+package anaconda
+
+type DirectMessage struct {
+	CreatedAt           string   `json:"created_at"`
+	Entities            Entities `json:"entities"`
+	Id                  int64    `json:"id"`
+	IdStr               string   `json:"id_str"`
+	Recipient           User     `json:"recipient"`
+	RecipientId         int64    `json:"recipient_id"`
+	RecipientScreenName string   `json:"recipient_screen_name"`
+	Sender              User     `json:"sender"`
+	SenderId            int64    `json:"sender_id"`
+	SenderScreenName    string   `json:"sender_screen_name"`
+	Text                string   `json:"text"`
+}

+ 57 - 0
vendor/github.com/ChimeraCoder/anaconda/directmessages.go

@@ -0,0 +1,57 @@
+package anaconda
+
+import (
+	"net/url"
+	"strconv"
+)
+
+func (a TwitterApi) GetDirectMessages(v url.Values) (messages []DirectMessage, err error) {
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/direct_messages.json", v, &messages, _GET, response_ch}
+	return messages, (<-response_ch).err
+}
+
+func (a TwitterApi) GetDirectMessagesSent(v url.Values) (messages []DirectMessage, err error) {
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/direct_messages/sent.json", v, &messages, _GET, response_ch}
+	return messages, (<-response_ch).err
+}
+
+func (a TwitterApi) GetDirectMessagesShow(v url.Values) (message DirectMessage, err error) {
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/direct_messages/show.json", v, &message, _GET, response_ch}
+	return message, (<-response_ch).err
+}
+
+// https://developer.twitter.com/en/docs/direct-messages/sending-and-receiving/api-reference/new-message
+func (a TwitterApi) PostDMToScreenName(text, screenName string) (message DirectMessage, err error) {
+	v := url.Values{}
+	v.Set("screen_name", screenName)
+	v.Set("text", text)
+	return a.postDirectMessagesImpl(v)
+}
+
+// https://developer.twitter.com/en/docs/direct-messages/sending-and-receiving/api-reference/new-message
+func (a TwitterApi) PostDMToUserId(text string, userId int64) (message DirectMessage, err error) {
+	v := url.Values{}
+	v.Set("user_id", strconv.FormatInt(userId, 10))
+	v.Set("text", text)
+	return a.postDirectMessagesImpl(v)
+}
+
+// DeleteDirectMessage will destroy (delete) the direct message with the specified ID.
+// https://developer.twitter.com/en/docs/direct-messages/sending-and-receiving/api-reference/delete-message
+func (a TwitterApi) DeleteDirectMessage(id int64, includeEntities bool) (message DirectMessage, err error) {
+	v := url.Values{}
+	v.Set("id", strconv.FormatInt(id, 10))
+	v.Set("include_entities", strconv.FormatBool(includeEntities))
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/direct_messages/destroy.json", v, &message, _POST, response_ch}
+	return message, (<-response_ch).err
+}
+
+func (a TwitterApi) postDirectMessagesImpl(v url.Values) (message DirectMessage, err error) {
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/direct_messages/new.json", v, &message, _POST, response_ch}
+	return message, (<-response_ch).err
+}

+ 114 - 0
vendor/github.com/ChimeraCoder/anaconda/errors.go

@@ -0,0 +1,114 @@
+package anaconda
+
+import (
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"strconv"
+	"time"
+)
+
+const (
+	//Error code defintions match the Twitter documentation
+	//https://developer.twitter.com/en/docs/basics/response-codes
+	TwitterErrorCouldNotAuthenticate    = 32
+	TwitterErrorDoesNotExist            = 34
+	TwitterErrorAccountSuspended        = 64
+	TwitterErrorApi1Deprecation         = 68 //This should never be needed
+	TwitterErrorRateLimitExceeded       = 88
+	TwitterErrorInvalidToken            = 89
+	TwitterErrorOverCapacity            = 130
+	TwitterErrorInternalError           = 131
+	TwitterErrorCouldNotAuthenticateYou = 135
+	TwitterErrorStatusIsADuplicate      = 187
+	TwitterErrorBadAuthenticationData   = 215
+	TwitterErrorUserMustVerifyLogin     = 231
+
+	// Undocumented by Twitter, but may be returned instead of 34
+	TwitterErrorDoesNotExist2 = 144
+)
+
+type ApiError struct {
+	StatusCode int
+	Header     http.Header
+	Body       string
+	Decoded    TwitterErrorResponse
+	URL        *url.URL
+}
+
+func newApiError(resp *http.Response) *ApiError {
+	// TODO don't ignore this error
+	// TODO don't use ReadAll
+	p, _ := ioutil.ReadAll(resp.Body)
+
+	var twitterErrorResp TwitterErrorResponse
+	_ = json.Unmarshal(p, &twitterErrorResp)
+	return &ApiError{
+		StatusCode: resp.StatusCode,
+		Header:     resp.Header,
+		Body:       string(p),
+		Decoded:    twitterErrorResp,
+		URL:        resp.Request.URL,
+	}
+}
+
+// ApiError supports the error interface
+func (aerr ApiError) Error() string {
+	return fmt.Sprintf("Get %s returned status %d, %s", aerr.URL, aerr.StatusCode, aerr.Body)
+}
+
+// Check to see if an error is a Rate Limiting error. If so, find the next available window in the header.
+// Use like so:
+//
+//    if aerr, ok := err.(*ApiError); ok {
+//  	  if isRateLimitError, nextWindow := aerr.RateLimitCheck(); isRateLimitError {
+//       	<-time.After(nextWindow.Sub(time.Now()))
+//  	  }
+//    }
+//
+func (aerr *ApiError) RateLimitCheck() (isRateLimitError bool, nextWindow time.Time) {
+	// TODO  check for error code 130, which also signifies a rate limit
+	if aerr.StatusCode == 429 {
+		if reset := aerr.Header.Get("X-Rate-Limit-Reset"); reset != "" {
+			if resetUnix, err := strconv.ParseInt(reset, 10, 64); err == nil {
+				resetTime := time.Unix(resetUnix, 0)
+				// Reject any time greater than an hour away
+				if resetTime.Sub(time.Now()) > time.Hour {
+					return true, time.Now().Add(15 * time.Minute)
+				}
+
+				return true, resetTime
+			}
+		}
+	}
+
+	return false, time.Time{}
+}
+
+//TwitterErrorResponse has an array of Twitter error messages
+//It satisfies the "error" interface
+//For the most part, Twitter seems to return only a single error message
+//Currently, we assume that this always contains exactly one error message
+type TwitterErrorResponse struct {
+	Errors []TwitterError `json:"errors"`
+}
+
+func (tr TwitterErrorResponse) First() error {
+	return tr.Errors[0]
+}
+
+func (tr TwitterErrorResponse) Error() string {
+	return tr.Errors[0].Message
+}
+
+//TwitterError represents a single Twitter error messages/code pair
+type TwitterError struct {
+	Message string `json:"message"`
+	Code    int    `json:"code"`
+}
+
+func (te TwitterError) Error() string {
+	return te.Message
+}

+ 11 - 0
vendor/github.com/ChimeraCoder/anaconda/favorites.go

@@ -0,0 +1,11 @@
+package anaconda
+
+import (
+	"net/url"
+)
+
+func (a TwitterApi) GetFavorites(v url.Values) (favorites []Tweet, err error) {
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/favorites/list.json", v, &favorites, _GET, response_ch}
+	return favorites, (<-response_ch).err
+}

+ 289 - 0
vendor/github.com/ChimeraCoder/anaconda/friends_followers.go

@@ -0,0 +1,289 @@
+package anaconda
+
+import (
+	"net/url"
+	"strconv"
+)
+
+type Cursor struct {
+	Previous_cursor     int64
+	Previous_cursor_str string
+
+	Ids []int64
+
+	Next_cursor     int64
+	Next_cursor_str string
+}
+
+type UserCursor struct {
+	Previous_cursor     int64
+	Previous_cursor_str string
+	Next_cursor         int64
+	Next_cursor_str     string
+	Users               []User
+}
+
+type FriendsIdsCursor struct {
+	Previous_cursor     int64
+	Previous_cursor_str string
+	Next_cursor         int64
+	Next_cursor_str     string
+	Ids                 []int64
+}
+
+type FriendsIdsPage struct {
+	Ids   []int64
+	Error error
+}
+
+type Friendship struct {
+	Name        string
+	Id_str      string
+	Id          int64
+	Connections []string
+	Screen_name string
+}
+
+type FollowersPage struct {
+	Followers []User
+	Error     error
+}
+
+type FriendsPage struct {
+	Friends []User
+	Error   error
+}
+
+// FIXME: Might want to consolidate this with FriendsIdsPage and just
+//		  have "UserIdsPage".
+type FollowersIdsPage struct {
+	Ids   []int64
+	Error error
+}
+
+// GetFriendshipsNoRetweets returns a collection of user_ids that the currently authenticated user does not want to receive retweets from.
+// It does not currently support the stringify_ids parameter.
+func (a TwitterApi) GetFriendshipsNoRetweets() (ids []int64, err error) {
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/friendships/no_retweets/ids.json", nil, &ids, _GET, response_ch}
+	return ids, (<-response_ch).err
+}
+
+func (a TwitterApi) GetFollowersIds(v url.Values) (c Cursor, err error) {
+	err = a.apiGet(a.baseUrl+"/followers/ids.json", v, &c)
+	return
+}
+
+// Like GetFollowersIds, but returns a channel instead of a cursor and pre-fetches the remaining results
+// This channel is closed once all values have been fetched
+func (a TwitterApi) GetFollowersIdsAll(v url.Values) (result chan FollowersIdsPage) {
+	result = make(chan FollowersIdsPage)
+
+	v = cleanValues(v)
+	go func(a TwitterApi, v url.Values, result chan FollowersIdsPage) {
+		// Cursor defaults to the first page ("-1")
+		next_cursor := "-1"
+		for {
+			v.Set("cursor", next_cursor)
+			c, err := a.GetFollowersIds(v)
+
+			// throttledQuery() handles all rate-limiting errors
+			// if GetFollowersList() returns an error, it must be a different kind of error
+
+			result <- FollowersIdsPage{c.Ids, err}
+
+			next_cursor = c.Next_cursor_str
+			if err != nil || next_cursor == "0" {
+				close(result)
+				break
+			}
+		}
+	}(a, v, result)
+	return result
+}
+
+func (a TwitterApi) GetFriendsIds(v url.Values) (c Cursor, err error) {
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/friends/ids.json", v, &c, _GET, response_ch}
+	return c, (<-response_ch).err
+}
+
+func (a TwitterApi) GetFriendshipsLookup(v url.Values) (friendships []Friendship, err error) {
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/friendships/lookup.json", v, &friendships, _GET, response_ch}
+	return friendships, (<-response_ch).err
+}
+
+func (a TwitterApi) GetFriendshipsIncoming(v url.Values) (c Cursor, err error) {
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/friendships/incoming.json", v, &c, _GET, response_ch}
+	return c, (<-response_ch).err
+}
+
+func (a TwitterApi) GetFriendshipsOutgoing(v url.Values) (c Cursor, err error) {
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/friendships/outgoing.json", v, &c, _GET, response_ch}
+	return c, (<-response_ch).err
+}
+
+func (a TwitterApi) GetFollowersList(v url.Values) (c UserCursor, err error) {
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/followers/list.json", v, &c, _GET, response_ch}
+	return c, (<-response_ch).err
+}
+
+func (a TwitterApi) GetFriendsList(v url.Values) (c UserCursor, err error) {
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/friends/list.json", v, &c, _GET, response_ch}
+	return c, (<-response_ch).err
+}
+
+// Like GetFriendsList, but returns a channel instead of a cursor and pre-fetches the remaining results
+// This channel is closed once all values have been fetched
+func (a TwitterApi) GetFriendsListAll(v url.Values) (result chan FriendsPage) {
+	result = make(chan FriendsPage)
+
+	v = cleanValues(v)
+	go func(a TwitterApi, v url.Values, result chan FriendsPage) {
+		// Cursor defaults to the first page ("-1")
+		next_cursor := "-1"
+		for {
+			v.Set("cursor", next_cursor)
+			c, err := a.GetFriendsList(v)
+
+			// throttledQuery() handles all rate-limiting errors
+			// if GetFriendsListAll() returns an error, it must be a different kind of error
+
+			result <- FriendsPage{c.Users, err}
+
+			next_cursor = c.Next_cursor_str
+			if err != nil || next_cursor == "0" {
+				close(result)
+				break
+			}
+		}
+	}(a, v, result)
+	return result
+}
+
+// Like GetFollowersList, but returns a channel instead of a cursor and pre-fetches the remaining results
+// This channel is closed once all values have been fetched
+func (a TwitterApi) GetFollowersListAll(v url.Values) (result chan FollowersPage) {
+	result = make(chan FollowersPage)
+
+	v = cleanValues(v)
+	go func(a TwitterApi, v url.Values, result chan FollowersPage) {
+		// Cursor defaults to the first page ("-1")
+		next_cursor := "-1"
+		for {
+			v.Set("cursor", next_cursor)
+			c, err := a.GetFollowersList(v)
+
+			// throttledQuery() handles all rate-limiting errors
+			// if GetFollowersList() returns an error, it must be a different kind of error
+
+			result <- FollowersPage{c.Users, err}
+
+			next_cursor = c.Next_cursor_str
+			if err != nil || next_cursor == "0" {
+				close(result)
+				break
+			}
+		}
+	}(a, v, result)
+	return result
+}
+
+func (a TwitterApi) GetFollowersUser(id int64, v url.Values) (c Cursor, err error) {
+	v = cleanValues(v)
+	v.Set("user_id", strconv.FormatInt(id, 10))
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/followers/ids.json", v, &c, _GET, response_ch}
+	return c, (<-response_ch).err
+}
+
+// Like GetFriendsIds, but returns a channel instead of a cursor and pre-fetches the remaining results
+// This channel is closed once all values have been fetched
+func (a TwitterApi) GetFriendsIdsAll(v url.Values) (result chan FriendsIdsPage) {
+	result = make(chan FriendsIdsPage)
+
+	v = cleanValues(v)
+	go func(a TwitterApi, v url.Values, result chan FriendsIdsPage) {
+		// Cursor defaults to the first page ("-1")
+		next_cursor := "-1"
+		for {
+			v.Set("cursor", next_cursor)
+			c, err := a.GetFriendsIds(v)
+
+			// throttledQuery() handles all rate-limiting errors
+			// if GetFollowersList() returns an error, it must be a different kind of error
+
+			result <- FriendsIdsPage{c.Ids, err}
+
+			next_cursor = c.Next_cursor_str
+			if err != nil || next_cursor == "0" {
+				close(result)
+				break
+			}
+		}
+	}(a, v, result)
+	return result
+}
+
+func (a TwitterApi) GetFriendsUser(id int64, v url.Values) (c Cursor, err error) {
+	v = cleanValues(v)
+	v.Set("user_id", strconv.FormatInt(id, 10))
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/friends/ids.json", v, &c, _GET, response_ch}
+	return c, (<-response_ch).err
+}
+
+// FollowUserId follows the user with the specified userId.
+// This implements the /friendships/create endpoint, though the function name
+// uses the terminology 'follow' as this is most consistent with colloquial Twitter terminology.
+func (a TwitterApi) FollowUserId(userId int64, v url.Values) (user User, err error) {
+	v = cleanValues(v)
+	v.Set("user_id", strconv.FormatInt(userId, 10))
+	return a.postFriendshipsCreateImpl(v)
+}
+
+// FollowUserId follows the user with the specified screenname (username).
+// This implements the /friendships/create endpoint, though the function name
+// uses the terminology 'follow' as this is most consistent with colloquial Twitter terminology.
+func (a TwitterApi) FollowUser(screenName string) (user User, err error) {
+	v := url.Values{}
+	v.Set("screen_name", screenName)
+	return a.postFriendshipsCreateImpl(v)
+}
+
+func (a TwitterApi) postFriendshipsCreateImpl(v url.Values) (user User, err error) {
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/friendships/create.json", v, &user, _POST, response_ch}
+	return user, (<-response_ch).err
+}
+
+// UnfollowUserId unfollows the user with the specified userId.
+// This implements the /friendships/destroy endpoint, though the function name
+// uses the terminology 'unfollow' as this is most consistent with colloquial Twitter terminology.
+func (a TwitterApi) UnfollowUserId(userId int64) (u User, err error) {
+	v := url.Values{}
+	v.Set("user_id", strconv.FormatInt(userId, 10))
+	// Set other values before calling this method:
+	// page, count, include_entities
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/friendships/destroy.json", v, &u, _POST, response_ch}
+	return u, (<-response_ch).err
+}
+
+// UnfollowUser unfollows the user with the specified screenname (username)
+// This implements the /friendships/destroy endpoint, though the function name
+// uses the terminology 'unfollow' as this is most consistent with colloquial Twitter terminology.
+func (a TwitterApi) UnfollowUser(screenname string) (u User, err error) {
+	v := url.Values{}
+	v.Set("screen_name", screenname)
+	// Set other values before calling this method:
+	// page, count, include_entities
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/friendships/destroy.json", v, &u, _POST, response_ch}
+	return u, (<-response_ch).err
+}

+ 57 - 0
vendor/github.com/ChimeraCoder/anaconda/geosearch.go

@@ -0,0 +1,57 @@
+package anaconda
+
+import "net/url"
+
+type GeoSearchResult struct {
+	Result struct {
+		Places []struct {
+			ID              string `json:"id"`
+			URL             string `json:"url"`
+			PlaceType       string `json:"place_type"`
+			Name            string `json:"name"`
+			FullName        string `json:"full_name"`
+			CountryCode     string `json:"country_code"`
+			Country         string `json:"country"`
+			ContainedWithin []struct {
+				ID          string    `json:"id"`
+				URL         string    `json:"url"`
+				PlaceType   string    `json:"place_type"`
+				Name        string    `json:"name"`
+				FullName    string    `json:"full_name"`
+				CountryCode string    `json:"country_code"`
+				Country     string    `json:"country"`
+				Centroid    []float64 `json:"centroid"`
+				BoundingBox struct {
+					Type        string        `json:"type"`
+					Coordinates [][][]float64 `json:"coordinates"`
+				} `json:"bounding_box"`
+				Attributes struct {
+				} `json:"attributes"`
+			} `json:"contained_within"`
+			Centroid    []float64 `json:"centroid"`
+			BoundingBox struct {
+				Type        string        `json:"type"`
+				Coordinates [][][]float64 `json:"coordinates"`
+			} `json:"bounding_box"`
+			Attributes struct {
+			} `json:"attributes"`
+		} `json:"places"`
+	} `json:"result"`
+	Query struct {
+		URL    string `json:"url"`
+		Type   string `json:"type"`
+		Params struct {
+			Accuracy     float64 `json:"accuracy"`
+			Granularity  string  `json:"granularity"`
+			Query        string  `json:"query"`
+			Autocomplete bool    `json:"autocomplete"`
+			TrimPlace    bool    `json:"trim_place"`
+		} `json:"params"`
+	} `json:"query"`
+}
+
+func (a TwitterApi) GeoSearch(v url.Values) (c GeoSearchResult, err error) {
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/geo/search.json", v, &c, _GET, response_ch}
+	return c, (<-response_ch).err
+}

+ 26 - 0
vendor/github.com/ChimeraCoder/anaconda/list.go

@@ -0,0 +1,26 @@
+package anaconda
+
+type ListResponse struct {
+	PreviousCursor int    `json:"previous_cursor"`
+	NextCursor     int    `json:"next_cursor"`
+	Lists          []List `json:"lists"`
+}
+
+type AddUserToListResponse struct {
+	Users []User `json:"users"`
+}
+
+type List struct {
+	Slug            string `json:"slug"`
+	Name            string `json:"name"`
+	URL             string `json:"uri"`
+	CreatedAt       string `json:"created_at"`
+	Id              int64  `json:"id"`
+	SubscriberCount int64  `json:"subscriber_count"`
+	MemberCount     int64  `json:"member_count"`
+	Mode            string `json:"mode"`
+	FullName        string `json:"full_name"`
+	Description     string `json:"description"`
+	User            User   `json:"user"`
+	Following       bool   `json:"following"`
+}

+ 87 - 0
vendor/github.com/ChimeraCoder/anaconda/lists.go

@@ -0,0 +1,87 @@
+package anaconda
+
+import (
+	"net/url"
+	"strconv"
+	"strings"
+)
+
+// CreateList implements /lists/create.json
+func (a TwitterApi) CreateList(name, description string, v url.Values) (list List, err error) {
+	v = cleanValues(v)
+	v.Set("name", name)
+	v.Set("description", description)
+
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/lists/create.json", v, &list, _POST, response_ch}
+	return list, (<-response_ch).err
+}
+
+// AddUserToList implements /lists/members/create.json
+func (a TwitterApi) AddUserToList(screenName string, listID int64, v url.Values) (users []User, err error) {
+	v = cleanValues(v)
+	v.Set("list_id", strconv.FormatInt(listID, 10))
+	v.Set("screen_name", screenName)
+
+	var addUserToListResponse AddUserToListResponse
+
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/lists/members/create.json", v, &addUserToListResponse, _POST, response_ch}
+	return addUserToListResponse.Users, (<-response_ch).err
+}
+
+// AddMultipleUsersToList implements /lists/members/create_all.json
+func (a TwitterApi) AddMultipleUsersToList(screenNames []string, listID int64, v url.Values) (list List, err error) {
+	v = cleanValues(v)
+	v.Set("list_id", strconv.FormatInt(listID, 10))
+	v.Set("screen_name", strings.Join(screenNames, ","))
+
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/lists/members/create_all.json", v, &list, _POST, response_ch}
+	r := <-response_ch
+	return list, r.err
+}
+
+// GetListsOwnedBy implements /lists/ownerships.json
+// screen_name, count, and cursor are all optional values
+func (a TwitterApi) GetListsOwnedBy(userID int64, v url.Values) (lists []List, err error) {
+	v = cleanValues(v)
+	v.Set("user_id", strconv.FormatInt(userID, 10))
+
+	var listResponse ListResponse
+
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/lists/ownerships.json", v, &listResponse, _GET, response_ch}
+	return listResponse.Lists, (<-response_ch).err
+}
+
+func (a TwitterApi) GetListTweets(listID int64, includeRTs bool, v url.Values) (tweets []Tweet, err error) {
+	v = cleanValues(v)
+	v.Set("list_id", strconv.FormatInt(listID, 10))
+	v.Set("include_rts", strconv.FormatBool(includeRTs))
+
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/lists/statuses.json", v, &tweets, _GET, response_ch}
+	return tweets, (<-response_ch).err
+}
+
+// GetList implements /lists/show.json
+func (a TwitterApi) GetList(listID int64, v url.Values) (list List, err error) {
+	v = cleanValues(v)
+	v.Set("list_id", strconv.FormatInt(listID, 10))
+
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/lists/show.json", v, &list, _GET, response_ch}
+	return list, (<-response_ch).err
+}
+
+func (a TwitterApi) GetListTweetsBySlug(slug string, ownerScreenName string, includeRTs bool, v url.Values) (tweets []Tweet, err error) {
+	v = cleanValues(v)
+	v.Set("slug", slug)
+	v.Set("owner_screen_name", ownerScreenName)
+	v.Set("include_rts", strconv.FormatBool(includeRTs))
+
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/lists/statuses.json", v, &tweets, _GET, response_ch}
+	return tweets, (<-response_ch).err
+}

+ 91 - 0
vendor/github.com/ChimeraCoder/anaconda/log.go

@@ -0,0 +1,91 @@
+package anaconda
+
+import (
+	"log"
+	"os"
+)
+
+// The Logger interface provides optional logging ability for the streaming API.
+// It can also be used to log the rate limiting headers if desired.
+type Logger interface {
+	Fatal(args ...interface{})
+	Fatalf(format string, args ...interface{})
+
+	Panic(args ...interface{})
+	Panicf(format string, args ...interface{})
+
+	// Log functions
+	Critical(args ...interface{})
+	Criticalf(format string, args ...interface{})
+
+	Error(args ...interface{})
+	Errorf(format string, args ...interface{})
+
+	Warning(args ...interface{})
+	Warningf(format string, args ...interface{})
+
+	Notice(args ...interface{})
+	Noticef(format string, args ...interface{})
+
+	Info(args ...interface{})
+	Infof(format string, args ...interface{})
+
+	Debug(args ...interface{})
+	Debugf(format string, args ...interface{})
+}
+
+// SetLogger sets the Logger used by the API client.
+// The default logger is silent. BasicLogger will log to STDERR
+// using the log package from the standard library.
+func (c *TwitterApi) SetLogger(l Logger) {
+	c.Log = l
+}
+
+type silentLogger struct {
+}
+
+func (_ silentLogger) Fatal(_ ...interface{})                 {}
+func (_ silentLogger) Fatalf(_ string, _ ...interface{})      {}
+func (_ silentLogger) Panic(_ ...interface{})                 {}
+func (_ silentLogger) Panicf(_ string, _ ...interface{})      {}
+func (_ silentLogger) Critical(_ ...interface{})              {}
+func (_ silentLogger) Criticalf(_ string, _ ...interface{})   {}
+func (_ silentLogger) Error(_ ...interface{})                 {}
+func (_ silentLogger) Errorf(_ string, _ ...interface{})      {}
+func (_ silentLogger) Warning(_ ...interface{})               {}
+func (_ silentLogger) Warningf(_ string, _ ...interface{})    {}
+func (_ silentLogger) Notice(_ ...interface{})                {}
+func (_ silentLogger) Noticef(_ string, _ ...interface{})     {}
+func (_ silentLogger) Info(_ ...interface{})                  {}
+func (_ silentLogger) Infof(_ string, _ ...interface{})       {}
+func (_ silentLogger) Debug(_ ...interface{})                 {}
+func (_ silentLogger) Debugf(format string, _ ...interface{}) {}
+
+// BasicLogger is the equivalent of using log from the standard
+// library to print to STDERR.
+var BasicLogger Logger
+
+type basicLogger struct {
+	log *log.Logger //func New(out io.Writer, prefix string, flag int) *Logger
+}
+
+func init() {
+	BasicLogger = &basicLogger{log: log.New(os.Stderr, log.Prefix(), log.LstdFlags)}
+}
+
+func (l basicLogger) Fatal(items ...interface{})               { l.log.Fatal(items...) }
+func (l basicLogger) Fatalf(s string, items ...interface{})    { l.log.Fatalf(s, items...) }
+func (l basicLogger) Panic(items ...interface{})               { l.log.Panic(items...) }
+func (l basicLogger) Panicf(s string, items ...interface{})    { l.log.Panicf(s, items...) }
+func (l basicLogger) Critical(items ...interface{})            { l.log.Print(items...) }
+func (l basicLogger) Criticalf(s string, items ...interface{}) { l.log.Printf(s, items...) }
+func (l basicLogger) Error(items ...interface{})               { l.log.Print(items...) }
+func (l basicLogger) Errorf(s string, items ...interface{})    { l.log.Printf(s, items...) }
+func (l basicLogger) Warning(items ...interface{})             { l.log.Print(items...) }
+func (l basicLogger) Warningf(s string, items ...interface{})  { l.log.Printf(s, items...) }
+func (l basicLogger) Notice(items ...interface{})              { l.log.Print(items...) }
+func (l basicLogger) Noticef(s string, items ...interface{})   { l.log.Printf(s, items...) }
+func (l basicLogger) Info(items ...interface{})                { l.log.Print(items...) }
+func (l basicLogger) Infof(s string, items ...interface{})     { l.log.Printf(s, items...) }
+func (l basicLogger) Debug(items ...interface{})               { l.log.Print(items...) }
+func (l basicLogger) Debugf(s string, items ...interface{})    { l.log.Printf(s, items...) }

+ 89 - 0
vendor/github.com/ChimeraCoder/anaconda/media.go

@@ -0,0 +1,89 @@
+package anaconda
+
+import (
+	"net/url"
+	"strconv"
+)
+
+type Media struct {
+	MediaID       int64  `json:"media_id"`
+	MediaIDString string `json:"media_id_string"`
+	Size          int    `json:"size"`
+	Image         Image  `json:"image"`
+}
+
+type Image struct {
+	W         int    `json:"w"`
+	H         int    `json:"h"`
+	ImageType string `json:"image_type"`
+}
+
+type ChunkedMedia struct {
+	MediaID          int64  `json:"media_id"`
+	MediaIDString    string `json:"media_id_string"`
+	ExpiresAfterSecs int    `json:"expires_after_secs"`
+}
+
+type Video struct {
+	VideoType string `json:"video_type"`
+}
+
+type VideoMedia struct {
+	MediaID          int64  `json:"media_id"`
+	MediaIDString    string `json:"media_id_string"`
+	Size             int    `json:"size"`
+	ExpiresAfterSecs int    `json:"expires_after_secs"`
+	Video            Video  `json:"video"`
+}
+
+func (a TwitterApi) UploadMedia(base64String string) (media Media, err error) {
+	v := url.Values{}
+	v.Set("media_data", base64String)
+
+	var mediaResponse Media
+
+	response_ch := make(chan response)
+	a.queryQueue <- query{UploadBaseUrl + "/media/upload.json", v, &mediaResponse, _POST, response_ch}
+	return mediaResponse, (<-response_ch).err
+}
+
+func (a TwitterApi) UploadVideoInit(totalBytes int, mimeType string) (chunkedMedia ChunkedMedia, err error) {
+	v := url.Values{}
+	v.Set("command", "INIT")
+	v.Set("media_type", mimeType)
+	v.Set("total_bytes", strconv.FormatInt(int64(totalBytes), 10))
+
+	var mediaResponse ChunkedMedia
+
+	response_ch := make(chan response)
+	a.queryQueue <- query{UploadBaseUrl + "/media/upload.json", v, &mediaResponse, _POST, response_ch}
+	return mediaResponse, (<-response_ch).err
+}
+
+func (a TwitterApi) UploadVideoAppend(mediaIdString string,
+	segmentIndex int, base64String string) error {
+
+	v := url.Values{}
+	v.Set("command", "APPEND")
+	v.Set("media_id", mediaIdString)
+	v.Set("media_data", base64String)
+	v.Set("segment_index", strconv.FormatInt(int64(segmentIndex), 10))
+
+	var emptyResponse interface{}
+
+	response_ch := make(chan response)
+	a.queryQueue <- query{UploadBaseUrl + "/media/upload.json", v, &emptyResponse, _POST, response_ch}
+	return (<-response_ch).err
+}
+
+func (a TwitterApi) UploadVideoFinalize(mediaIdString string) (videoMedia VideoMedia, err error) {
+	v := url.Values{}
+	v.Set("command", "FINALIZE")
+	v.Set("media_id", mediaIdString)
+
+	var mediaResponse VideoMedia
+
+	response_ch := make(chan response)
+	a.queryQueue <- query{UploadBaseUrl + "/media/upload.json", v, &mediaResponse, _POST, response_ch}
+	return mediaResponse, (<-response_ch).err
+}

+ 54 - 0
vendor/github.com/ChimeraCoder/anaconda/mutes.go

@@ -0,0 +1,54 @@
+package anaconda
+
+import (
+	"net/url"
+	"strconv"
+)
+
+func (a TwitterApi) GetMutedUsersList(v url.Values) (c UserCursor, err error) {
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/mutes/users/list.json", v, &c, _GET, response_ch}
+	return c, (<-response_ch).err
+}
+
+func (a TwitterApi) GetMutedUsersIds(v url.Values) (c Cursor, err error) {
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/mutes/users/ids.json", v, &c, _GET, response_ch}
+	return c, (<-response_ch).err
+}
+
+func (a TwitterApi) MuteUser(screenName string, v url.Values) (user User, err error) {
+	v = cleanValues(v)
+	v.Set("screen_name", screenName)
+	return a.Mute(v)
+}
+
+func (a TwitterApi) MuteUserId(id int64, v url.Values) (user User, err error) {
+	v = cleanValues(v)
+	v.Set("user_id", strconv.FormatInt(id, 10))
+	return a.Mute(v)
+}
+
+func (a TwitterApi) Mute(v url.Values) (user User, err error) {
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/mutes/users/create.json", v, &user, _POST, response_ch}
+	return user, (<-response_ch).err
+}
+
+func (a TwitterApi) UnmuteUser(screenName string, v url.Values) (user User, err error) {
+	v = cleanValues(v)
+	v.Set("screen_name", screenName)
+	return a.Unmute(v)
+}
+
+func (a TwitterApi) UnmuteUserId(id int64, v url.Values) (user User, err error) {
+	v = cleanValues(v)
+	v.Set("user_id", strconv.FormatInt(id, 10))
+	return a.Unmute(v)
+}
+
+func (a TwitterApi) Unmute(v url.Values) (user User, err error) {
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/mutes/users/destroy.json", v, &user, _POST, response_ch}
+	return user, (<-response_ch).err
+}

+ 59 - 0
vendor/github.com/ChimeraCoder/anaconda/oembed.go

@@ -0,0 +1,59 @@
+package anaconda
+
+import (
+	"net/http"
+	"net/url"
+	"strconv"
+)
+
+type OEmbed struct {
+	Type          string
+	Width         int
+	Cache_age     string
+	Height        int
+	Author_url    string
+	Html          string
+	Version       string
+	Provider_name string
+	Provider_url  string
+	Url           string
+	Author_name   string
+}
+
+// No authorization on this endpoint. Its the only one.
+func (a TwitterApi) GetOEmbed(v url.Values) (o OEmbed, err error) {
+	resp, err := http.Get(a.baseUrlV1() + "/statuses/oembed.json?" + v.Encode())
+	if err != nil {
+		return
+	}
+	defer resp.Body.Close()
+
+	err = decodeResponse(resp, &o)
+	return
+}
+
+// Calls GetOEmbed with the corresponding id. Convenience wrapper for GetOEmbed()
+func (a TwitterApi) GetOEmbedId(id int64, v url.Values) (o OEmbed, err error) {
+	v = cleanValues(v)
+	v.Set("id", strconv.FormatInt(id, 10))
+	resp, err := http.Get(a.baseUrlV1() + "/statuses/oembed.json?" + v.Encode())
+	if err != nil {
+		return
+	}
+	defer resp.Body.Close()
+
+	err = decodeResponse(resp, &o)
+	return
+}
+
+func (a TwitterApi) baseUrlV1() string {
+	if a.baseUrl == BaseUrl {
+		return BaseUrlV1
+	}
+
+	if a.baseUrl == "" {
+		return BaseUrlV1
+	}
+
+	return a.baseUrl
+}

+ 35 - 0
vendor/github.com/ChimeraCoder/anaconda/place.go

@@ -0,0 +1,35 @@
+package anaconda
+
+type Place struct {
+	Attributes  map[string]string `json:"attributes"`
+	BoundingBox struct {
+		Coordinates [][][]float64 `json:"coordinates"`
+		Type        string        `json:"type"`
+	} `json:"bounding_box"`
+	ContainedWithin []struct {
+		Attributes  map[string]string `json:"attributes"`
+		BoundingBox struct {
+			Coordinates [][][]float64 `json:"coordinates"`
+			Type        string        `json:"type"`
+		} `json:"bounding_box"`
+		Country     string `json:"country"`
+		CountryCode string `json:"country_code"`
+		FullName    string `json:"full_name"`
+		ID          string `json:"id"`
+		Name        string `json:"name"`
+		PlaceType   string `json:"place_type"`
+		URL         string `json:"url"`
+	} `json:"contained_within"`
+	Country     string `json:"country"`
+	CountryCode string `json:"country_code"`
+	FullName    string `json:"full_name"`
+	Geometry    struct {
+		Coordinates [][][]float64 `json:"coordinates"`
+		Type        string        `json:"type"`
+	} `json:"geometry"`
+	ID        string   `json:"id"`
+	Name      string   `json:"name"`
+	PlaceType string   `json:"place_type"`
+	Polylines []string `json:"polylines"`
+	URL       string   `json:"url"`
+}

+ 30 - 0
vendor/github.com/ChimeraCoder/anaconda/rate_limit_status.go

@@ -0,0 +1,30 @@
+package anaconda
+
+import (
+	"net/url"
+	"strings"
+)
+
+type RateLimitStatusResponse struct {
+	RateLimitContext RateLimitContext                   `json:"rate_limit_context"`
+	Resources        map[string]map[string]BaseResource `json:"resources"`
+}
+
+type RateLimitContext struct {
+	AccessToken string `json:"access_token"`
+}
+
+type BaseResource struct {
+	Limit     int `json:"limit"`
+	Remaining int `json:"remaining"`
+	Reset     int `json:"reset"`
+}
+
+func (a TwitterApi) GetRateLimits(r []string) (rateLimitStatusResponse RateLimitStatusResponse, err error) {
+	resources := strings.Join(r, ",")
+	v := url.Values{}
+	v.Set("resources", resources)
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/application/rate_limit_status.json", v, &rateLimitStatusResponse, _GET, response_ch}
+	return rateLimitStatusResponse, (<-response_ch).err
+}

+ 40 - 0
vendor/github.com/ChimeraCoder/anaconda/relationship.go

@@ -0,0 +1,40 @@
+package anaconda
+
+import (
+	"net/url"
+)
+
+type RelationshipResponse struct {
+	Relationship Relationship `json:"relationship"`
+}
+type Relationship struct {
+	Target Target `json:"target"`
+	Source Source `json:"source"`
+}
+type Target struct {
+	Id          int64  `json:"id"`
+	Id_str      string `json:"id_str"`
+	Screen_name string `json:"screen_name"`
+	Following   bool   `json:"following"`
+	Followed_by bool   `json:"followed_by"`
+}
+type Source struct {
+	Id                    int64
+	Id_str                string
+	Screen_name           string
+	Following             bool
+	Followed_by           bool
+	Can_dm                bool
+	Blocking              bool
+	Muting                bool
+	Marked_spam           bool
+	All_replies           bool
+	Want_retweets         bool
+	Notifications_enabled bool
+}
+
+func (a TwitterApi) GetFriendshipsShow(v url.Values) (relationshipResponse RelationshipResponse, err error) {
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/friendships/show.json", v, &relationshipResponse, _GET, response_ch}
+	return relationshipResponse, (<-response_ch).err
+}

+ 57 - 0
vendor/github.com/ChimeraCoder/anaconda/search.go

@@ -0,0 +1,57 @@
+package anaconda
+
+import (
+	"net/url"
+)
+
+type SearchMetadata struct {
+	CompletedIn   float32 `json:"completed_in"`
+	MaxId         int64   `json:"max_id"`
+	MaxIdString   string  `json:"max_id_str"`
+	Query         string  `json:"query"`
+	RefreshUrl    string  `json:"refresh_url"`
+	Count         int     `json:"count"`
+	SinceId       int64   `json:"since_id"`
+	SinceIdString string  `json:"since_id_str"`
+	NextResults   string  `json:"next_results"`
+}
+
+type SearchResponse struct {
+	Statuses []Tweet        `json:"statuses"`
+	Metadata SearchMetadata `json:"search_metadata"`
+}
+
+func (sr *SearchResponse) GetNext(a *TwitterApi) (SearchResponse, error) {
+	if sr.Metadata.NextResults == "" {
+		return SearchResponse{}, nil
+	}
+	nextUrl, err := url.Parse(sr.Metadata.NextResults)
+	if err != nil {
+		return SearchResponse{}, err
+	}
+
+	v := nextUrl.Query()
+	// remove the q parameter from the url.Values so that it
+	// can be added back via the next GetSearch method call.
+	delete(v, "q")
+
+	q, _ := url.QueryUnescape(sr.Metadata.Query)
+	if err != nil {
+		return SearchResponse{}, err
+	}
+	newSr, err := a.GetSearch(q, v)
+	return newSr, err
+}
+
+func (a TwitterApi) GetSearch(queryString string, v url.Values) (sr SearchResponse, err error) {
+	v = cleanValues(v)
+	v.Set("q", queryString)
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/search/tweets.json", v, &sr, _GET, response_ch}
+
+	// We have to read from the response channel before assigning to timeline
+	// Otherwise this will happen before the responses have been written
+	resp := <-response_ch
+	err = resp.err
+	return sr, err
+}

+ 318 - 0
vendor/github.com/ChimeraCoder/anaconda/streaming.go

@@ -0,0 +1,318 @@
+package anaconda
+
+import (
+	"bufio"
+	"encoding/json"
+	"fmt"
+	"io"
+	"net/http"
+	"net/url"
+
+	"github.com/dustin/go-jsonpointer"
+)
+
+const (
+	BaseUrlUserStream = "https://userstream.twitter.com/1.1"
+	BaseUrlSiteStream = "https://sitestream.twitter.com/1.1"
+	BaseUrlStream     = "https://stream.twitter.com/1.1"
+)
+
+// messages
+
+type StatusDeletionNotice struct {
+	Id        int64  `json:"id"`
+	IdStr     string `json:"id_str"`
+	UserId    int64  `json:"user_id"`
+	UserIdStr string `json:"user_id_str"`
+}
+type statusDeletionNotice struct {
+	Delete *struct {
+		Status *StatusDeletionNotice `json:"status"`
+	} `json:"delete"`
+}
+
+type DirectMessageDeletionNotice struct {
+	Id        int64  `json:"id"`
+	IdStr     string `json:"id_str"`
+	UserId    int64  `json:"user_id"`
+	UserIdStr string `json:"user_id_str"`
+}
+
+type directMessageDeletionNotice struct {
+	Delete *struct {
+		DirectMessage *DirectMessageDeletionNotice `json:"direct_message"`
+	} `json:"delete"`
+}
+
+type LocationDeletionNotice struct {
+	UserId          int64  `json:"user_id"`
+	UserIdStr       string `json:"user_id_str"`
+	UpToStatusId    int64  `json:"up_to_status_id"`
+	UpToStatusIdStr string `json:"up_to_status_id_str"`
+}
+type locationDeletionNotice struct {
+	ScrubGeo *LocationDeletionNotice `json:"scrub_geo"`
+}
+
+type LimitNotice struct {
+	Track int64 `json:"track"`
+}
+type limitNotice struct {
+	Limit *LimitNotice `json:"limit"`
+}
+
+type StatusWithheldNotice struct {
+	Id                  int64    `json:"id"`
+	UserId              int64    `json:"user_id"`
+	WithheldInCountries []string `json:"withheld_in_countries"`
+}
+type statusWithheldNotice struct {
+	StatusWithheld *StatusWithheldNotice `json:"status_withheld"`
+}
+
+type UserWithheldNotice struct {
+	Id                  int64    `json:"id"`
+	WithheldInCountries []string `json:"withheld_in_countries"`
+}
+type userWithheldNotice struct {
+	UserWithheld *UserWithheldNotice `json:"user_withheld"`
+}
+
+type DisconnectMessage struct {
+	Code       int64  `json:"code"`
+	StreamName string `json:"stream_name"`
+	Reason     string `json:"reason"`
+}
+type disconnectMessage struct {
+	Disconnect *DisconnectMessage `json:"disconnect"`
+}
+
+type StallWarning struct {
+	Code        string `json:"code"`
+	Message     string `json:"message"`
+	PercentFull int64  `json:"percent_full"`
+}
+type stallWarning struct {
+	Warning *StallWarning `json:"warning"`
+}
+
+type FriendsList []int64
+type friendsList struct {
+	Friends *FriendsList `json:"friends"`
+}
+
+type streamDirectMessage struct {
+	DirectMessage *DirectMessage `json:"direct_message"`
+}
+
+type Event struct {
+	Target    *User  `json:"target"`
+	Source    *User  `json:"source"`
+	Event     string `json:"event"`
+	CreatedAt string `json:"created_at"`
+}
+
+type EventList struct {
+	Event
+	TargetObject *List `json:"target_object"`
+}
+
+type EventTweet struct {
+	Event
+	TargetObject *Tweet `json:"target_object"`
+}
+
+type EventFollow struct {
+	Event
+}
+
+type TooManyFollow struct {
+	Warning *struct {
+		Code    string `json:"code"`
+		Message string `json:"message"`
+		UserId  int64  `json:"user_id"`
+	} `json:"warning"`
+}
+
+// TODO: Site Stream messages. I cant test.
+
+// Stream allows you to stream using one of the
+// PublicStream* or UserStream api methods
+//
+// A go loop is started an gives you an stream that sends interface{}
+// objects through it's chan C
+// Objects which you can cast into a tweet like this :
+//    t, ok := o.(twitter.Tweet) // try casting into a tweet
+//    if !ok {
+//      log.Debug("Recieved non tweet message")
+//    }
+//
+// If we can't stream the chan will be closed.
+// Otherwise the loop will connect and send streams in the chan.
+// It will also try to reconnect itself after an exponential backoff time
+// if the connection is lost
+// If twitter response is one of 420, 429 or 503 (meaning "wait a sec")
+// the loop retries to open the socket with a simple autogrowing backoff.
+//
+// When finished streaming call stream.Stop() to initiate termination process.
+//
+
+type Stream struct {
+	api TwitterApi
+	C   chan interface{}
+	run bool
+}
+
+func (s *Stream) listen(response http.Response) {
+	if response.Body != nil {
+		defer response.Body.Close()
+	}
+
+	s.api.Log.Notice("Listening to twitter socket")
+	defer s.api.Log.Notice("twitter socket closed, leaving loop")
+
+	scanner := bufio.NewScanner(response.Body)
+
+	for scanner.Scan() && s.run {
+		j := scanner.Bytes()
+		if len(j) == 0 {
+			s.api.Log.Debug("Empty bytes... Moving along")
+		} else {
+			s.C <- jsonToKnownType(j)
+		}
+	}
+}
+
+func jsonToKnownType(j []byte) interface{} {
+	// TODO: DRY
+	if o := new(Tweet); jsonAsStruct(j, "/source", &o) {
+		return *o
+	} else if o := new(statusDeletionNotice); jsonAsStruct(j, "/delete/status", &o) {
+		return *o.Delete.Status
+	} else if o := new(directMessageDeletionNotice); jsonAsStruct(j, "/delete/direct_message", &o) {
+		return *o.Delete.DirectMessage
+	} else if o := new(locationDeletionNotice); jsonAsStruct(j, "/scrub_geo", &o) {
+		return *o.ScrubGeo
+	} else if o := new(limitNotice); jsonAsStruct(j, "/limit", &o) {
+		return *o.Limit
+	} else if o := new(statusWithheldNotice); jsonAsStruct(j, "/status_withheld", &o) {
+		return *o.StatusWithheld
+	} else if o := new(userWithheldNotice); jsonAsStruct(j, "/user_withheld", &o) {
+		return *o.UserWithheld
+	} else if o := new(disconnectMessage); jsonAsStruct(j, "/disconnect", &o) {
+		return *o.Disconnect
+	} else if o := new(stallWarning); jsonAsStruct(j, "/warning", &o) {
+		return *o.Warning
+	} else if o := new(friendsList); jsonAsStruct(j, "/friends", &o) {
+		return *o.Friends
+	} else if o := new(streamDirectMessage); jsonAsStruct(j, "/direct_message", &o) {
+		return *o.DirectMessage
+	} else if o := new(EventTweet); jsonAsStruct(j, "/target_object/source", &o) {
+		return *o
+	} else if o := new(EventList); jsonAsStruct(j, "/target_object/slug", &o) {
+		return *o
+	} else if o := new(Event); jsonAsStruct(j, "/target_object", &o) {
+		return *o
+	} else if o := new(EventFollow); jsonAsStruct(j, "/event", &o) {
+		return *o
+	} else {
+		return nil
+	}
+}
+
+func (s *Stream) requestStream(urlStr string, v url.Values, method int) (resp *http.Response, err error) {
+	switch method {
+	case _GET:
+		return s.api.oauthClient.Get(s.api.HttpClient, s.api.Credentials, urlStr, v)
+	case _POST:
+		return s.api.oauthClient.Post(s.api.HttpClient, s.api.Credentials, urlStr, v)
+	default:
+	}
+	return nil, fmt.Errorf("HTTP method not yet supported")
+}
+
+func (s *Stream) loop(urlStr string, v url.Values, method int) {
+	defer s.api.Log.Debug("Leaving request stream loop")
+	defer close(s.C)
+
+	rlb := NewHTTP420ErrBackoff()
+	for s.run {
+		resp, err := s.requestStream(urlStr, v, method)
+		if err != nil {
+			if err == io.EOF {
+				// Sometimes twitter closes the stream
+				// right away with EOF as of a rate limit
+				resp.StatusCode = 420
+			} else {
+				s.api.Log.Criticalf("Cannot request stream : %s", err)
+				return
+			}
+		}
+		s.api.Log.Debugf("Response status=%s code=%d", resp.Status, resp.StatusCode)
+
+		switch resp.StatusCode {
+		case 200, 304:
+			s.listen(*resp)
+			rlb.Reset()
+		case 420, 429, 503:
+			s.api.Log.Noticef("Twitter streaming: backing off as got : %+s", resp.Status)
+			rlb.BackOff()
+		case 400, 401, 403, 404, 406, 410, 422, 500, 502, 504:
+			s.api.Log.Criticalf("Twitter streaming: leaving after an irremediable error: %+s", resp.Status)
+			return
+		default:
+			s.api.Log.Notice("Received unknown status: %+s", resp.StatusCode)
+		}
+
+	}
+}
+
+func (s *Stream) Stop() {
+	s.run = false
+}
+
+func (s *Stream) start(urlStr string, v url.Values, method int) {
+	s.run = true
+	go s.loop(urlStr, v, method)
+}
+
+func (a TwitterApi) newStream(urlStr string, v url.Values, method int) *Stream {
+	stream := Stream{
+		api: a,
+		C:   make(chan interface{}),
+	}
+
+	stream.start(urlStr, v, method)
+	return &stream
+}
+
+func (a TwitterApi) UserStream(v url.Values) (stream *Stream) {
+	return a.newStream(BaseUrlUserStream+"/user.json", v, _GET)
+}
+
+func (a TwitterApi) PublicStreamSample(v url.Values) (stream *Stream) {
+	return a.newStream(BaseUrlStream+"/statuses/sample.json", v, _GET)
+}
+
+// XXX: To use this API authority is requied. but I dont have this. I cant test.
+func (a TwitterApi) PublicStreamFirehose(v url.Values) (stream *Stream) {
+	return a.newStream(BaseUrlStream+"/statuses/firehose.json", v, _GET)
+}
+
+// XXX: PublicStream(Track|Follow|Locations) func is needed?
+func (a TwitterApi) PublicStreamFilter(v url.Values) (stream *Stream) {
+	return a.newStream(BaseUrlStream+"/statuses/filter.json", v, _POST)
+}
+
+// XXX: To use this API authority is requied. but I dont have this. I cant test.
+func (a TwitterApi) SiteStream(v url.Values) (stream *Stream) {
+	return a.newStream(BaseUrlSiteStream+"/site.json", v, _GET)
+}
+
+func jsonAsStruct(j []byte, path string, obj interface{}) (res bool) {
+	if v, _ := jsonpointer.Find(j, path); v == nil {
+		return false
+	}
+	err := json.Unmarshal(j, obj)
+	return err == nil
+}

+ 45 - 0
vendor/github.com/ChimeraCoder/anaconda/timeline.go

@@ -0,0 +1,45 @@
+package anaconda
+
+import (
+	"net/url"
+)
+
+// GetHomeTimeline returns the most recent tweets and retweets posted by the user
+// and the users that they follow.
+// https://developer.twitter.com/en/docs/tweets/timelines/api-reference/get-statuses-home_timeline
+// By default, include_entities is set to "true"
+func (a TwitterApi) GetHomeTimeline(v url.Values) (timeline []Tweet, err error) {
+	v = cleanValues(v)
+	if val := v.Get("include_entities"); val == "" {
+		v.Set("include_entities", "true")
+	}
+
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/statuses/home_timeline.json", v, &timeline, _GET, response_ch}
+	return timeline, (<-response_ch).err
+}
+
+// GetUserTimeline returns a collection of the most recent Tweets posted by the user indicated by the screen_name or user_id parameters.
+// https://developer.twitter.com/en/docs/tweets/timelines/api-reference/get-statuses-user_timeline
+func (a TwitterApi) GetUserTimeline(v url.Values) (timeline []Tweet, err error) {
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/statuses/user_timeline.json", v, &timeline, _GET, response_ch}
+	return timeline, (<-response_ch).err
+}
+
+// GetMentionsTimeline returns the most recent mentions (Tweets containing a users’s @screen_name) for the authenticating user.
+// The timeline returned is the equivalent of the one seen when you view your mentions on twitter.com.
+// https://developer.twitter.com/en/docs/tweets/timelines/api-reference/get-statuses-mentions_timeline
+func (a TwitterApi) GetMentionsTimeline(v url.Values) (timeline []Tweet, err error) {
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/statuses/mentions_timeline.json", v, &timeline, _GET, response_ch}
+	return timeline, (<-response_ch).err
+}
+
+// GetRetweetsOfMe returns the most recent Tweets authored by the authenticating user that have been retweeted by others.
+// https://developer.twitter.com/en/docs/tweets/post-and-engage/api-reference/get-statuses-retweets_of_me
+func (a TwitterApi) GetRetweetsOfMe(v url.Values) (tweets []Tweet, err error) {
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/statuses/retweets_of_me.json", v, &tweets, _GET, response_ch}
+	return tweets, (<-response_ch).err
+}

+ 64 - 0
vendor/github.com/ChimeraCoder/anaconda/trends.go

@@ -0,0 +1,64 @@
+package anaconda
+
+import (
+	"net/url"
+	"strconv"
+)
+
+type Location struct {
+	Name  string `json:"name"`
+	Woeid int    `json:"woeid"`
+}
+
+type Trend struct {
+	Name            string `json:"name"`
+	Query           string `json:"query"`
+	Url             string `json:"url"`
+	PromotedContent string `json:"promoted_content"`
+}
+
+type TrendResponse struct {
+	Trends    []Trend    `json:"trends"`
+	AsOf      string     `json:"as_of"`
+	CreatedAt string     `json:"created_at"`
+	Locations []Location `json:"locations"`
+}
+
+type TrendLocation struct {
+	Country     string `json:"country"`
+	CountryCode string `json:"countryCode"`
+	Name        string `json:"name"`
+	ParentId    int    `json:"parentid"`
+	PlaceType   struct {
+		Code int    `json:"code"`
+		Name string `json:"name"`
+	} `json:"placeType"`
+	Url   string `json:"url"`
+	Woeid int32  `json:"woeid"`
+}
+
+// https://developer.twitter.com/en/docs/trends/trends-for-location/api-reference/get-trends-place
+func (a TwitterApi) GetTrendsByPlace(id int64, v url.Values) (trendResp TrendResponse, err error) {
+	response_ch := make(chan response)
+	v = cleanValues(v)
+	v.Set("id", strconv.FormatInt(id, 10))
+	a.queryQueue <- query{a.baseUrl + "/trends/place.json", v, &[]interface{}{&trendResp}, _GET, response_ch}
+	return trendResp, (<-response_ch).err
+}
+
+// https://developer.twitter.com/en/docs/trends/locations-with-trending-topics/api-reference/get-trends-available
+func (a TwitterApi) GetTrendsAvailableLocations(v url.Values) (locations []TrendLocation, err error) {
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/trends/available.json", v, &locations, _GET, response_ch}
+	return locations, (<-response_ch).err
+}
+
+// https://developer.twitter.com/en/docs/trends/locations-with-trending-topics/api-reference/get-trends-closest
+func (a TwitterApi) GetTrendsClosestLocations(lat float64, long float64, v url.Values) (locations []TrendLocation, err error) {
+	response_ch := make(chan response)
+	v = cleanValues(v)
+	v.Set("lat", strconv.FormatFloat(lat, 'f', 6, 64))
+	v.Set("long", strconv.FormatFloat(long, 'f', 6, 64))
+	a.queryQueue <- query{a.baseUrl + "/trends/closest.json", v, &locations, _GET, response_ch}
+	return locations, (<-response_ch).err
+}

+ 154 - 0
vendor/github.com/ChimeraCoder/anaconda/tweet.go

@@ -0,0 +1,154 @@
+package anaconda
+
+import (
+	"encoding/json"
+	"fmt"
+	"time"
+)
+
+type Tweet struct {
+	Contributors                []int64                `json:"contributors"`
+	Coordinates                 *Coordinates           `json:"coordinates"`
+	CreatedAt                   string                 `json:"created_at"`
+	DisplayTextRange            []int                  `json:"display_text_range"`
+	Entities                    Entities               `json:"entities"`
+	ExtendedEntities            Entities               `json:"extended_entities"`
+	ExtendedTweet               ExtendedTweet          `json:"extended_tweet"`
+	FavoriteCount               int                    `json:"favorite_count"`
+	Favorited                   bool                   `json:"favorited"`
+	FilterLevel                 string                 `json:"filter_level"`
+	FullText                    string                 `json:"full_text"`
+	HasExtendedProfile          bool                   `json:"has_extended_profile"`
+	Id                          int64                  `json:"id"`
+	IdStr                       string                 `json:"id_str"`
+	InReplyToScreenName         string                 `json:"in_reply_to_screen_name"`
+	InReplyToStatusID           int64                  `json:"in_reply_to_status_id"`
+	InReplyToStatusIdStr        string                 `json:"in_reply_to_status_id_str"`
+	InReplyToUserID             int64                  `json:"in_reply_to_user_id"`
+	InReplyToUserIdStr          string                 `json:"in_reply_to_user_id_str"`
+	IsTranslationEnabled        bool                   `json:"is_translation_enabled"`
+	Lang                        string                 `json:"lang"`
+	Place                       Place                  `json:"place"`
+	QuotedStatusID              int64                  `json:"quoted_status_id"`
+	QuotedStatusIdStr           string                 `json:"quoted_status_id_str"`
+	QuotedStatus                *Tweet                 `json:"quoted_status"`
+	PossiblySensitive           bool                   `json:"possibly_sensitive"`
+	PossiblySensitiveAppealable bool                   `json:"possibly_sensitive_appealable"`
+	RetweetCount                int                    `json:"retweet_count"`
+	Retweeted                   bool                   `json:"retweeted"`
+	RetweetedStatus             *Tweet                 `json:"retweeted_status"`
+	Source                      string                 `json:"source"`
+	Scopes                      map[string]interface{} `json:"scopes"`
+	Text                        string                 `json:"text"`
+	User                        User                   `json:"user"`
+	WithheldCopyright           bool                   `json:"withheld_copyright"`
+	WithheldInCountries         []string               `json:"withheld_in_countries"`
+	WithheldScope               string                 `json:"withheld_scope"`
+
+	//Geo is deprecated
+	//Geo                  interface{} `json:"geo"`
+}
+
+// CreatedAtTime is a convenience wrapper that returns the Created_at time, parsed as a time.Time struct
+func (t Tweet) CreatedAtTime() (time.Time, error) {
+	return time.Parse(time.RubyDate, t.CreatedAt)
+}
+
+// It may be worth placing these in an additional source file(s)
+
+// Could also use User, since the fields match, but only these fields are possible in Contributor
+type Contributor struct {
+	Id         int64  `json:"id"`
+	IdStr      string `json:"id_str"`
+	ScreenName string `json:"screen_name"`
+}
+
+type Coordinates struct {
+	Coordinates [2]float64 `json:"coordinates"` // Coordinate always has to have exactly 2 values
+	Type        string     `json:"type"`
+}
+
+type ExtendedTweet struct {
+	FullText         string   `json:"full_text"`
+	DisplayTextRange []int    `json:"display_text_range"`
+	Entities         Entities `json:"entities"`
+	ExtendedEntities Entities `json:"extended_entities"`
+}
+
+// HasCoordinates is a helper function to easily determine if a Tweet has coordinates associated with it
+func (t Tweet) HasCoordinates() bool {
+	if t.Coordinates != nil {
+		if t.Coordinates.Type == "Point" {
+			return true
+		}
+	}
+	return false
+}
+
+// The following provide convenience and eliviate confusion about the order of coordinates in the Tweet
+
+// Latitude is a convenience wrapper that returns the latitude easily
+func (t Tweet) Latitude() (float64, error) {
+	if t.HasCoordinates() {
+		return t.Coordinates.Coordinates[1], nil
+	}
+	return 0, fmt.Errorf("No Coordinates in this Tweet")
+}
+
+// Longitude is a convenience wrapper that returns the longitude easily
+func (t Tweet) Longitude() (float64, error) {
+	if t.HasCoordinates() {
+		return t.Coordinates.Coordinates[0], nil
+	}
+	return 0, fmt.Errorf("No Coordinates in this Tweet")
+}
+
+// X is a convenience wrapper which returns the X (Longitude) coordinate easily
+func (t Tweet) X() (float64, error) {
+	return t.Longitude()
+}
+
+// Y is a convenience wrapper which return the Y (Lattitude) corrdinate easily
+func (t Tweet) Y() (float64, error) {
+	return t.Latitude()
+}
+
+func (t *Tweet) extractExtendedTweet() {
+	// if the TruncatedText is set, the API does not return an extended tweet
+	// we need to manually set the Text in this case
+	if len(t.Text) > 0 && len(t.FullText) == 0 {
+		t.FullText = t.Text
+	}
+
+	if len(t.ExtendedTweet.FullText) > 0 {
+		t.DisplayTextRange = t.ExtendedTweet.DisplayTextRange
+		t.Entities = t.ExtendedTweet.Entities
+		t.ExtendedEntities = t.ExtendedTweet.ExtendedEntities
+		t.FullText = t.ExtendedTweet.FullText
+	}
+
+	// if the API supplied us with information how to extract the shortened
+	// text, extract it
+	if len(t.Text) == 0 && len(t.DisplayTextRange) == 2 {
+		t.Text = t.FullText[t.DisplayTextRange[0]:t.DisplayTextRange[1]]
+	}
+	// if the truncated text is still empty then full & truncated text are equal
+	if len(t.Text) == 0 {
+		t.Text = t.FullText
+	}
+}
+
+func (t *Tweet) UnmarshalJSON(data []byte) error {
+	type Alias Tweet
+	aux := &struct {
+		*Alias
+	}{
+		Alias: (*Alias)(t),
+	}
+	if err := json.Unmarshal(data, &aux); err != nil {
+		return err
+	}
+
+	t.extractExtendedTweet()
+	return nil
+}

+ 107 - 0
vendor/github.com/ChimeraCoder/anaconda/tweets.go

@@ -0,0 +1,107 @@
+package anaconda
+
+import (
+	"fmt"
+	"net/url"
+	"strconv"
+)
+
+func (a TwitterApi) GetTweet(id int64, v url.Values) (tweet Tweet, err error) {
+	v = cleanValues(v)
+	v.Set("id", strconv.FormatInt(id, 10))
+
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/statuses/show.json", v, &tweet, _GET, response_ch}
+	return tweet, (<-response_ch).err
+}
+
+func (a TwitterApi) GetTweetsLookupByIds(ids []int64, v url.Values) (tweet []Tweet, err error) {
+	var pids string
+	for w, i := range ids {
+		pids += strconv.FormatInt(i, 10)
+		if w != len(ids)-1 {
+			pids += ","
+		}
+	}
+	v = cleanValues(v)
+	v.Set("id", pids)
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/statuses/lookup.json", v, &tweet, _GET, response_ch}
+	return tweet, (<-response_ch).err
+}
+
+func (a TwitterApi) GetRetweets(id int64, v url.Values) (tweets []Tweet, err error) {
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + fmt.Sprintf("/statuses/retweets/%d.json", id), v, &tweets, _GET, response_ch}
+	return tweets, (<-response_ch).err
+}
+
+//PostTweet will create a tweet with the specified status message
+func (a TwitterApi) PostTweet(status string, v url.Values) (tweet Tweet, err error) {
+	v = cleanValues(v)
+	v.Set("status", status)
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/statuses/update.json", v, &tweet, _POST, response_ch}
+	return tweet, (<-response_ch).err
+}
+
+//DeleteTweet will destroy (delete) the status (tweet) with the specified ID, assuming that the authenticated user is the author of the status (tweet).
+//If trimUser is set to true, only the user's Id will be provided in the user object returned.
+func (a TwitterApi) DeleteTweet(id int64, trimUser bool) (tweet Tweet, err error) {
+	v := url.Values{}
+	if trimUser {
+		v.Set("trim_user", "t")
+	}
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + fmt.Sprintf("/statuses/destroy/%d.json", id), v, &tweet, _POST, response_ch}
+	return tweet, (<-response_ch).err
+}
+
+//Retweet will retweet the status (tweet) with the specified ID.
+//trimUser functions as in DeleteTweet
+func (a TwitterApi) Retweet(id int64, trimUser bool) (rt Tweet, err error) {
+	v := url.Values{}
+	if trimUser {
+		v.Set("trim_user", "t")
+	}
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + fmt.Sprintf("/statuses/retweet/%d.json", id), v, &rt, _POST, response_ch}
+	return rt, (<-response_ch).err
+}
+
+//UnRetweet will renove retweet Untweets a retweeted status.
+//Returns the original Tweet with retweet details embedded.
+//
+//https://developer.twitter.com/en/docs/tweets/post-and-engage/api-reference/post-statuses-unretweet-id
+//trim_user: tweet returned in a timeline will include a user object
+//including only the status authors numerical ID.
+func (a TwitterApi) UnRetweet(id int64, trimUser bool) (rt Tweet, err error) {
+	v := url.Values{}
+	if trimUser {
+		v.Set("trim_user", "t")
+	}
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + fmt.Sprintf("/statuses/unretweet/%d.json", id), v, &rt, _POST, response_ch}
+	return rt, (<-response_ch).err
+}
+
+// Favorite will favorite the status (tweet) with the specified ID.
+// https://developer.twitter.com/en/docs/tweets/post-and-engage/api-reference/post-favorites-create
+func (a TwitterApi) Favorite(id int64) (rt Tweet, err error) {
+	v := url.Values{}
+	v.Set("id", fmt.Sprint(id))
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + fmt.Sprintf("/favorites/create.json"), v, &rt, _POST, response_ch}
+	return rt, (<-response_ch).err
+}
+
+// Un-favorites the status specified in the ID parameter as the authenticating user.
+// Returns the un-favorited status in the requested format when successful.
+// https://developer.twitter.com/en/docs/tweets/post-and-engage/api-reference/post-favorites-destroy
+func (a TwitterApi) Unfavorite(id int64) (rt Tweet, err error) {
+	v := url.Values{}
+	v.Set("id", fmt.Sprint(id))
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + fmt.Sprintf("/favorites/destroy.json"), v, &rt, _POST, response_ch}
+	return rt, (<-response_ch).err
+}

+ 370 - 0
vendor/github.com/ChimeraCoder/anaconda/twitter.go

@@ -0,0 +1,370 @@
+//Package anaconda provides structs and functions for accessing version 1.1
+//of the Twitter API.
+//
+//Successful API queries return native Go structs that can be used immediately,
+//with no need for type assertions.
+//
+//Authentication
+//
+//If you already have the access token (and secret) for your user (Twitter provides this for your own account on the developer portal), creating the client is simple:
+//
+//  anaconda.SetConsumerKey("your-consumer-key")
+//  anaconda.SetConsumerSecret("your-consumer-secret")
+//  api := anaconda.NewTwitterApi("your-access-token", "your-access-token-secret")
+//
+//
+//Queries
+//
+//Executing queries on an authenticated TwitterApi struct is simple.
+//
+//  searchResult, _ := api.GetSearch("golang", nil)
+//  for _ , tweet := range searchResult.Statuses {
+//      fmt.Print(tweet.Text)
+//  }
+//
+//Certain endpoints allow separate optional parameter; if desired, these can be passed as the final parameter.
+//
+//  v := url.Values{}
+//  v.Set("count", "30")
+//  result, err := api.GetSearch("golang", v)
+//
+//
+//Endpoints
+//
+//Anaconda implements most of the endpoints defined in the Twitter API documentation: https://dev.twitter.com/docs/api/1.1.
+//For clarity, in most cases, the function name is simply the name of the HTTP method and the endpoint (e.g., the endpoint `GET /friendships/incoming` is provided by the function `GetFriendshipsIncoming`).
+//
+//In a few cases, a shortened form has been chosen to make life easier (for example, retweeting is simply the function `Retweet`)
+//
+//More detailed information about the behavior of each particular endpoint can be found at the official Twitter API documentation.
+package anaconda
+
+import (
+	"compress/zlib"
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"strings"
+	"time"
+
+	"github.com/ChimeraCoder/tokenbucket"
+	"github.com/garyburd/go-oauth/oauth"
+)
+
+const (
+	_GET          = iota
+	_POST         = iota
+	_DELETE       = iota
+	_PUT          = iota
+	BaseUrlV1     = "https://api.twitter.com/1"
+	BaseUrl       = "https://api.twitter.com/1.1"
+	UploadBaseUrl = "https://upload.twitter.com/1.1"
+)
+
+var (
+	oauthCredentials oauth.Credentials
+)
+
+type TwitterApi struct {
+	oauthClient          oauth.Client
+	Credentials          *oauth.Credentials
+	queryQueue           chan query
+	bucket               *tokenbucket.Bucket
+	returnRateLimitError bool
+	HttpClient           *http.Client
+
+	// Currently used only for the streaming API
+	// and for checking rate-limiting headers
+	// Default logger is silent
+	Log Logger
+
+	// used for testing
+	// defaults to BaseUrl
+	baseUrl string
+}
+
+type query struct {
+	url         string
+	form        url.Values
+	data        interface{}
+	method      int
+	response_ch chan response
+}
+
+type response struct {
+	data interface{}
+	err  error
+}
+
+const DEFAULT_DELAY = 0 * time.Second
+const DEFAULT_CAPACITY = 5
+
+//NewTwitterApi takes an user-specific access token and secret and returns a TwitterApi struct for that user.
+//The TwitterApi struct can be used for accessing any of the endpoints available.
+func NewTwitterApi(access_token string, access_token_secret string) *TwitterApi {
+	//TODO figure out how much to buffer this channel
+	//A non-buffered channel will cause blocking when multiple queries are made at the same time
+	queue := make(chan query)
+	c := &TwitterApi{
+		oauthClient: oauth.Client{
+			TemporaryCredentialRequestURI: "https://api.twitter.com/oauth/request_token",
+			ResourceOwnerAuthorizationURI: "https://api.twitter.com/oauth/authenticate",
+			TokenRequestURI:               "https://api.twitter.com/oauth/access_token",
+			Credentials:                   oauthCredentials,
+		},
+		Credentials: &oauth.Credentials{
+			Token:  access_token,
+			Secret: access_token_secret,
+		},
+		queryQueue:           queue,
+		bucket:               nil,
+		returnRateLimitError: false,
+		HttpClient:           http.DefaultClient,
+		Log:                  silentLogger{},
+		baseUrl:              BaseUrl,
+	}
+	go c.throttledQuery()
+	return c
+}
+
+//NewTwitterApiWithCredentials takes an app-specific consumer key and secret, along with a user-specific access token and secret and returns a TwitterApi struct for that user.
+//The TwitterApi struct can be used for accessing any of the endpoints available.
+func NewTwitterApiWithCredentials(access_token string, access_token_secret string, consumer_key string, consumer_secret string) *TwitterApi {
+	api := NewTwitterApi(access_token, access_token_secret)
+	api.oauthClient.Credentials.Token = consumer_key
+	api.oauthClient.Credentials.Secret = consumer_secret
+	return api
+}
+
+//SetConsumerKey will set the application-specific consumer_key used in the initial OAuth process
+//This key is listed on https://dev.twitter.com/apps/YOUR_APP_ID/show
+func SetConsumerKey(consumer_key string) {
+	oauthCredentials.Token = consumer_key
+}
+
+//SetConsumerSecret will set the application-specific secret used in the initial OAuth process
+//This secret is listed on https://dev.twitter.com/apps/YOUR_APP_ID/show
+func SetConsumerSecret(consumer_secret string) {
+	oauthCredentials.Secret = consumer_secret
+}
+
+// ReturnRateLimitError specifies behavior when the Twitter API returns a rate-limit error.
+// If set to true, the query will fail and return the error instead of automatically queuing and
+// retrying the query when the rate limit expires
+func (c *TwitterApi) ReturnRateLimitError(b bool) {
+	c.returnRateLimitError = b
+}
+
+// Enable query throttling using the tokenbucket algorithm
+func (c *TwitterApi) EnableThrottling(rate time.Duration, bufferSize int64) {
+	c.bucket = tokenbucket.NewBucket(rate, bufferSize)
+}
+
+// Disable query throttling
+func (c *TwitterApi) DisableThrottling() {
+	c.bucket = nil
+}
+
+// SetDelay will set the delay between throttled queries
+// To turn of throttling, set it to 0 seconds
+func (c *TwitterApi) SetDelay(t time.Duration) {
+	c.bucket.SetRate(t)
+}
+
+func (c *TwitterApi) GetDelay() time.Duration {
+	return c.bucket.GetRate()
+}
+
+// SetBaseUrl is experimental and may be removed in future releases.
+func (c *TwitterApi) SetBaseUrl(baseUrl string) {
+	c.baseUrl = baseUrl
+}
+
+//AuthorizationURL generates the authorization URL for the first part of the OAuth handshake.
+//Redirect the user to this URL.
+//This assumes that the consumer key has already been set (using SetConsumerKey or NewTwitterApiWithCredentials).
+func (c *TwitterApi) AuthorizationURL(callback string) (string, *oauth.Credentials, error) {
+	tempCred, err := c.oauthClient.RequestTemporaryCredentials(http.DefaultClient, callback, nil)
+	if err != nil {
+		return "", nil, err
+	}
+	return c.oauthClient.AuthorizationURL(tempCred, nil), tempCred, nil
+}
+
+// GetCredentials gets the access token using the verifier received with the callback URL and the
+// credentials in the first part of the handshake. GetCredentials implements the third part of the OAuth handshake.
+// The returned url.Values holds the access_token, the access_token_secret, the user_id and the screen_name.
+func (c *TwitterApi) GetCredentials(tempCred *oauth.Credentials, verifier string) (*oauth.Credentials, url.Values, error) {
+	return c.oauthClient.RequestToken(http.DefaultClient, tempCred, verifier)
+}
+
+func defaultValues(v url.Values) url.Values {
+	if v == nil {
+		v = url.Values{}
+	}
+	v.Set("tweet_mode", "extended")
+	return v
+}
+
+func cleanValues(v url.Values) url.Values {
+	if v == nil {
+		return url.Values{}
+	}
+	return v
+}
+
+// apiGet issues a GET request to the Twitter API and decodes the response JSON to data.
+func (c TwitterApi) apiGet(urlStr string, form url.Values, data interface{}) error {
+	form = defaultValues(form)
+	resp, err := c.oauthClient.Get(c.HttpClient, c.Credentials, urlStr, form)
+	if err != nil {
+		return err
+	}
+	defer resp.Body.Close()
+	return decodeResponse(resp, data)
+}
+
+// apiPost issues a POST request to the Twitter API and decodes the response JSON to data.
+func (c TwitterApi) apiPost(urlStr string, form url.Values, data interface{}) error {
+	resp, err := c.oauthClient.Post(c.HttpClient, c.Credentials, urlStr, form)
+	if err != nil {
+		return err
+	}
+	defer resp.Body.Close()
+	return decodeResponse(resp, data)
+}
+
+// apiDel issues a DELETE request to the Twitter API and decodes the response JSON to data.
+func (c TwitterApi) apiDel(urlStr string, form url.Values, data interface{}) error {
+	resp, err := c.oauthClient.Delete(c.HttpClient, c.Credentials, urlStr, form)
+	if err != nil {
+		return err
+	}
+	defer resp.Body.Close()
+	return decodeResponse(resp, data)
+}
+
+// apiPut issues a PUT request to the Twitter API and decodes the response JSON to data.
+func (c TwitterApi) apiPut(urlStr string, form url.Values, data interface{}) error {
+	resp, err := c.oauthClient.Put(c.HttpClient, c.Credentials, urlStr, form)
+	if err != nil {
+		return err
+	}
+	defer resp.Body.Close()
+	return decodeResponse(resp, data)
+}
+
+// decodeResponse decodes the JSON response from the Twitter API.
+func decodeResponse(resp *http.Response, data interface{}) error {
+	// Prevent memory leak in the case where the Response.Body is not used.
+	// As per the net/http package, Response.Body still needs to be closed.
+	defer resp.Body.Close()
+
+	// Twitter returns deflate data despite the client only requesting gzip
+	// data.  net/http automatically handles the latter but not the former:
+	// https://github.com/golang/go/issues/18779
+	if resp.Header.Get("Content-Encoding") == "deflate" {
+		var err error
+		resp.Body, err = zlib.NewReader(resp.Body)
+		if err != nil {
+			return err
+		}
+	}
+
+	// according to dev.twitter.com, chunked upload append returns HTTP 2XX
+	// so we need a special case when decoding the response
+	if strings.HasSuffix(resp.Request.URL.String(), "upload.json") {
+		if resp.StatusCode == 204 {
+			// empty response, don't decode
+			return nil
+		}
+		if resp.StatusCode < 200 || resp.StatusCode >= 300 {
+			return newApiError(resp)
+		}
+	} else if resp.StatusCode != 200 {
+		return newApiError(resp)
+	}
+	return json.NewDecoder(resp.Body).Decode(data)
+}
+
+func NewApiError(resp *http.Response) *ApiError {
+	body, _ := ioutil.ReadAll(resp.Body)
+
+	return &ApiError{
+		StatusCode: resp.StatusCode,
+		Header:     resp.Header,
+		Body:       string(body),
+		URL:        resp.Request.URL,
+	}
+}
+
+//query executes a query to the specified url, sending the values specified by form, and decodes the response JSON to data
+//method can be either _GET or _POST
+func (c TwitterApi) execQuery(urlStr string, form url.Values, data interface{}, method int) error {
+	switch method {
+	case _GET:
+		return c.apiGet(urlStr, form, data)
+	case _POST:
+		return c.apiPost(urlStr, form, data)
+	case _DELETE:
+		return c.apiPost(urlStr, form, data)
+	case _PUT:
+		return c.apiPost(urlStr, form, data)
+	default:
+		return fmt.Errorf("HTTP method not yet supported")
+	}
+}
+
+// throttledQuery executes queries and automatically throttles them according to SECONDS_PER_QUERY
+// It is the only function that reads from the queryQueue for a particular *TwitterApi struct
+
+func (c *TwitterApi) throttledQuery() {
+	for q := range c.queryQueue {
+		url := q.url
+		form := q.form
+		data := q.data //This is where the actual response will be written
+		method := q.method
+
+		response_ch := q.response_ch
+
+		if c.bucket != nil {
+			<-c.bucket.SpendToken(1)
+		}
+
+		err := c.execQuery(url, form, data, method)
+
+		// Check if Twitter returned a rate-limiting error
+		if err != nil {
+			if apiErr, ok := err.(*ApiError); ok {
+				if isRateLimitError, nextWindow := apiErr.RateLimitCheck(); isRateLimitError && !c.returnRateLimitError {
+					c.Log.Info(apiErr.Error())
+
+					// If this is a rate-limiting error, re-add the job to the queue
+					// TODO it really should preserve order
+					go func(q query) {
+						c.queryQueue <- q
+					}(q)
+
+					delay := nextWindow.Sub(time.Now())
+					<-time.After(delay)
+
+					// Drain the bucket (start over fresh)
+					if c.bucket != nil {
+						c.bucket.Drain()
+					}
+
+					continue
+				}
+			}
+		}
+
+		response_ch <- response{data, err}
+	}
+}
+
+// Close query queue
+func (c *TwitterApi) Close() {
+	close(c.queryQueue)
+}

+ 74 - 0
vendor/github.com/ChimeraCoder/anaconda/twitter_entities.go

@@ -0,0 +1,74 @@
+package anaconda
+
+type UrlEntity struct {
+	Urls []struct {
+		Indices      []int  `json:"indices"`
+		Url          string `json:"url"`
+		Display_url  string `json:"display_url"`
+		Expanded_url string `json:"expanded_url"`
+	} `json:"urls"`
+}
+
+type Entities struct {
+	Urls []struct {
+		Indices      []int  `json:"indices"`
+		Url          string `json:"url"`
+		Display_url  string `json:"display_url"`
+		Expanded_url string `json:"expanded_url"`
+	} `json:"urls"`
+	Hashtags []struct {
+		Indices []int  `json:"indices"`
+		Text    string `json:"text"`
+	} `json:"hashtags"`
+	Url           UrlEntity `json:"url"`
+	User_mentions []struct {
+		Name        string `json:"name"`
+		Indices     []int  `json:"indices"`
+		Screen_name string `json:"screen_name"`
+		Id          int64  `json:"id"`
+		Id_str      string `json:"id_str"`
+	} `json:"user_mentions"`
+	Media []EntityMedia `json:"media"`
+}
+
+type EntityMedia struct {
+	Id                   int64      `json:"id"`
+	Id_str               string     `json:"id_str"`
+	Media_url            string     `json:"media_url"`
+	Media_url_https      string     `json:"media_url_https"`
+	Url                  string     `json:"url"`
+	Display_url          string     `json:"display_url"`
+	Expanded_url         string     `json:"expanded_url"`
+	Sizes                MediaSizes `json:"sizes"`
+	Source_status_id     int64      `json:"source_status_id"`
+	Source_status_id_str string     `json:"source_status_id_str"`
+	Type                 string     `json:"type"`
+	Indices              []int      `json:"indices"`
+	VideoInfo            VideoInfo  `json:"video_info"`
+	ExtAltText           string     `json:"ext_alt_text"`
+}
+
+type MediaSizes struct {
+	Medium MediaSize `json:"medium"`
+	Thumb  MediaSize `json:"thumb"`
+	Small  MediaSize `json:"small"`
+	Large  MediaSize `json:"large"`
+}
+
+type MediaSize struct {
+	W      int    `json:"w"`
+	H      int    `json:"h"`
+	Resize string `json:"resize"`
+}
+
+type VideoInfo struct {
+	AspectRatio    []int     `json:"aspect_ratio"`
+	DurationMillis int64     `json:"duration_millis"`
+	Variants       []Variant `json:"variants"`
+}
+
+type Variant struct {
+	Bitrate     int    `json:"bitrate"`
+	ContentType string `json:"content_type"`
+	Url         string `json:"url"`
+}

+ 53 - 0
vendor/github.com/ChimeraCoder/anaconda/twitter_user.go

@@ -0,0 +1,53 @@
+package anaconda
+
+type User struct {
+	ContributorsEnabled            bool     `json:"contributors_enabled"`
+	CreatedAt                      string   `json:"created_at"`
+	DefaultProfile                 bool     `json:"default_profile"`
+	DefaultProfileImage            bool     `json:"default_profile_image"`
+	Description                    string   `json:"description"`
+	Email                          string   `json:"email"`
+	Entities                       Entities `json:"entities"`
+	FavouritesCount                int      `json:"favourites_count"`
+	FollowRequestSent              bool     `json:"follow_request_sent"`
+	FollowersCount                 int      `json:"followers_count"`
+	Following                      bool     `json:"following"`
+	FriendsCount                   int      `json:"friends_count"`
+	GeoEnabled                     bool     `json:"geo_enabled"`
+	HasExtendedProfile             bool     `json:"has_extended_profile"`
+	Id                             int64    `json:"id"`
+	IdStr                          string   `json:"id_str"`
+	IsTranslator                   bool     `json:"is_translator"`
+	IsTranslationEnabled           bool     `json:"is_translation_enabled"`
+	Lang                           string   `json:"lang"` // BCP-47 code of user defined language
+	ListedCount                    int64    `json:"listed_count"`
+	Location                       string   `json:"location"` // User defined location
+	Name                           string   `json:"name"`
+	Notifications                  bool     `json:"notifications"`
+	ProfileBackgroundColor         string   `json:"profile_background_color"`
+	ProfileBackgroundImageURL      string   `json:"profile_background_image_url"`
+	ProfileBackgroundImageUrlHttps string   `json:"profile_background_image_url_https"`
+	ProfileBackgroundTile          bool     `json:"profile_background_tile"`
+	ProfileBannerURL               string   `json:"profile_banner_url"`
+	ProfileImageURL                string   `json:"profile_image_url"`
+	ProfileImageUrlHttps           string   `json:"profile_image_url_https"`
+	ProfileLinkColor               string   `json:"profile_link_color"`
+	ProfileSidebarBorderColor      string   `json:"profile_sidebar_border_color"`
+	ProfileSidebarFillColor        string   `json:"profile_sidebar_fill_color"`
+	ProfileTextColor               string   `json:"profile_text_color"`
+	ProfileUseBackgroundImage      bool     `json:"profile_use_background_image"`
+	Protected                      bool     `json:"protected"`
+	ScreenName                     string   `json:"screen_name"`
+	ShowAllInlineMedia             bool     `json:"show_all_inline_media"`
+	Status                         *Tweet   `json:"status"` // Only included if the user is a friend
+	StatusesCount                  int64    `json:"statuses_count"`
+	TimeZone                       string   `json:"time_zone"`
+	URL                            string   `json:"url"`
+	UtcOffset                      int      `json:"utc_offset"`
+	Verified                       bool     `json:"verified"`
+	WithheldInCountries            []string `json:"withheld_in_countries"`
+	WithheldScope                  string   `json:"withheld_scope"`
+}
+
+// Provide language translator from BCP-47 to human readable format for Lang field?
+// Available through golang.org/x/text/language, deserves further investigation

+ 89 - 0
vendor/github.com/ChimeraCoder/anaconda/users.go

@@ -0,0 +1,89 @@
+package anaconda
+
+import (
+	"net/url"
+	"strconv"
+)
+
+func (a TwitterApi) GetUsersLookup(usernames string, v url.Values) (u []User, err error) {
+	v = cleanValues(v)
+	v.Set("screen_name", usernames)
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/users/lookup.json", v, &u, _GET, response_ch}
+	return u, (<-response_ch).err
+}
+
+func (a TwitterApi) GetUsersLookupByIds(ids []int64, v url.Values) (u []User, err error) {
+	var pids string
+	for w, i := range ids {
+		//pids += strconv.Itoa(i)
+		pids += strconv.FormatInt(i, 10)
+		if w != len(ids)-1 {
+			pids += ","
+		}
+	}
+	v = cleanValues(v)
+	v.Set("user_id", pids)
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/users/lookup.json", v, &u, _GET, response_ch}
+	return u, (<-response_ch).err
+}
+
+func (a TwitterApi) GetUsersShow(username string, v url.Values) (u User, err error) {
+	v = cleanValues(v)
+	v.Set("screen_name", username)
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/users/show.json", v, &u, _GET, response_ch}
+	return u, (<-response_ch).err
+}
+
+func (a TwitterApi) GetUsersShowById(id int64, v url.Values) (u User, err error) {
+	v = cleanValues(v)
+	v.Set("user_id", strconv.FormatInt(id, 10))
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/users/show.json", v, &u, _GET, response_ch}
+	return u, (<-response_ch).err
+}
+
+func (a TwitterApi) GetUserSearch(searchTerm string, v url.Values) (u []User, err error) {
+	v = cleanValues(v)
+	v.Set("q", searchTerm)
+	// Set other values before calling this method:
+	// page, count, include_entities
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/users/search.json", v, &u, _GET, response_ch}
+	return u, (<-response_ch).err
+}
+
+func (a TwitterApi) GetUsersSuggestions(v url.Values) (u []User, err error) {
+	v = cleanValues(v)
+	// Set other values before calling this method:
+	// page, count, include_entities
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/users/suggestions.json", v, &u, _GET, response_ch}
+	return u, (<-response_ch).err
+}
+
+// PostUsersReportSpam : Reports and Blocks a User by screen_name
+// Reference : https://developer.twitter.com/en/docs/accounts-and-users/mute-block-report-users/api-reference/post-users-report_spam
+// If you don't want to block the user you should add
+// v.Set("perform_block", "false")
+func (a TwitterApi) PostUsersReportSpam(username string, v url.Values) (u User, err error) {
+	v = cleanValues(v)
+	v.Set("screen_name", username)
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/users/report_spam.json", v, &u, _POST, response_ch}
+	return u, (<-response_ch).err
+}
+
+// PostUsersReportSpamById : Reports and Blocks a User by user_id
+// Reference : https://developer.twitter.com/en/docs/accounts-and-users/mute-block-report-users/api-reference/post-users-report_spam
+// If you don't want to block the user you should add
+// v.Set("perform_block", "false")
+func (a TwitterApi) PostUsersReportSpamById(id int64, v url.Values) (u User, err error) {
+	v = cleanValues(v)
+	v.Set("user_id", strconv.FormatInt(id, 10))
+	response_ch := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/users/report_spam.json", v, &u, _POST, response_ch}
+	return u, (<-response_ch).err
+}

+ 78 - 0
vendor/github.com/ChimeraCoder/anaconda/webhook.go

@@ -0,0 +1,78 @@
+package anaconda
+
+import (
+	"net/url"
+)
+
+//GetActivityWebhooks represents the twitter account_activity webhook
+//Returns all URLs and their statuses for the given app. Currently,
+//only one webhook URL can be registered to an application.
+//https://developer.twitter.com/en/docs/accounts-and-users/subscribe-account-activity/api-reference/get-webhook-config
+func (a TwitterApi) GetActivityWebhooks(v url.Values) (u []WebHookResp, err error) {
+	responseCh := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/account_activity/webhooks.json", v, &u, _GET, responseCh}
+	return u, (<-responseCh).err
+}
+
+//WebHookResp represents the Get webhook responses
+type WebHookResp struct {
+	ID        string
+	URL       string
+	Valid     bool
+	CreatedAt string
+}
+
+//SetActivityWebhooks represents to set twitter account_activity webhook
+//Registers a new webhook URL for the given application context.
+//The URL will be validated via CRC request before saving. In case the validation fails,
+//a comprehensive error is returned. message to the requester.
+//Only one webhook URL can be registered to an application.
+//https://developer.twitter.com/en/docs/accounts-and-users/subscribe-account-activity/api-reference/new-webhook-config
+func (a TwitterApi) SetActivityWebhooks(v url.Values) (u []WebHookResp, err error) {
+	responseCh := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/account_activity/webhooks.json", v, &u, _POST, responseCh}
+	return u, (<-responseCh).err
+}
+
+//DeleteActivityWebhooks Removes the webhook from the provided application’s configuration.
+//https://developer.twitter.com/en/docs/accounts-and-users/subscribe-account-activity/api-reference/delete-webhook-config
+func (a TwitterApi) DeleteActivityWebhooks(v url.Values, webhookID string) (u interface{}, err error) {
+	responseCh := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/account_activity/webhooks/" + webhookID + ".json", v, &u, _DELETE, responseCh}
+	return u, (<-responseCh).err
+}
+
+//PutActivityWebhooks update webhook which reenables the webhook by setting its status to valid.
+//https://developer.twitter.com/en/docs/accounts-and-users/subscribe-account-activity/api-reference/validate-webhook-config
+func (a TwitterApi) PutActivityWebhooks(v url.Values, webhookID string) (u interface{}, err error) {
+	responseCh := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/account_activity/webhooks/" + webhookID + ".json", v, &u, _PUT, responseCh}
+	return u, (<-responseCh).err
+}
+
+//SetWHSubscription Subscribes the provided app to events for the provided user context.
+//When subscribed, all DM events for the provided user will be sent to the app’s webhook via POST request.
+//https://developer.twitter.com/en/docs/accounts-and-users/subscribe-account-activity/api-reference/new-subscription
+func (a TwitterApi) SetWHSubscription(v url.Values, webhookID string) (u interface{}, err error) {
+	responseCh := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/account_activity/webhooks/" + webhookID + "/subscriptions.json", v, &u, _POST, responseCh}
+	return u, (<-responseCh).err
+}
+
+//GetWHSubscription Provides a way to determine if a webhook configuration is
+//subscribed to the provided user’s Direct Messages.
+//https://developer.twitter.com/en/docs/accounts-and-users/subscribe-account-activity/api-reference/get-subscription
+func (a TwitterApi) GetWHSubscription(v url.Values, webhookID string) (u interface{}, err error) {
+	responseCh := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/account_activity/webhooks/" + webhookID + "/subscriptions.json", v, &u, _GET, responseCh}
+	return u, (<-responseCh).err
+}
+
+//DeleteWHSubscription Deactivates subscription for the provided user context and app. After deactivation,
+//all DM events for the requesting user will no longer be sent to the webhook URL..
+//https://developer.twitter.com/en/docs/accounts-and-users/subscribe-account-activity/api-reference/delete-subscription
+func (a TwitterApi) DeleteWHSubscription(v url.Values, webhookID string) (u interface{}, err error) {
+	responseCh := make(chan response)
+	a.queryQueue <- query{a.baseUrl + "/account_activity/webhooks/" + webhookID + "/subscriptions.json", v, &u, _DELETE, responseCh}
+	return u, (<-responseCh).err
+}

+ 4 - 0
vendor/github.com/ChimeraCoder/tokenbucket/.gitignore

@@ -0,0 +1,4 @@
+*.swp
+*.swo
+*.swn
+conf.sh

+ 165 - 0
vendor/github.com/ChimeraCoder/tokenbucket/COPYING

@@ -0,0 +1,165 @@
+                   GNU LESSER GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+  This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+  0. Additional Definitions.
+
+  As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+  "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+  An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+  A "Combined Work" is a work produced by combining or linking an
+Application with the Library.  The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+  The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+  The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+  1. Exception to Section 3 of the GNU GPL.
+
+  You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+  2. Conveying Modified Versions.
+
+  If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+   a) under this License, provided that you make a good faith effort to
+   ensure that, in the event an Application does not supply the
+   function or data, the facility still operates, and performs
+   whatever part of its purpose remains meaningful, or
+
+   b) under the GNU GPL, with none of the additional permissions of
+   this License applicable to that copy.
+
+  3. Object Code Incorporating Material from Library Header Files.
+
+  The object code form of an Application may incorporate material from
+a header file that is part of the Library.  You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+   a) Give prominent notice with each copy of the object code that the
+   Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the object code with a copy of the GNU GPL and this license
+   document.
+
+  4. Combined Works.
+
+  You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+   a) Give prominent notice with each copy of the Combined Work that
+   the Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the Combined Work with a copy of the GNU GPL and this license
+   document.
+
+   c) For a Combined Work that displays copyright notices during
+   execution, include the copyright notice for the Library among
+   these notices, as well as a reference directing the user to the
+   copies of the GNU GPL and this license document.
+
+   d) Do one of the following:
+
+       0) Convey the Minimal Corresponding Source under the terms of this
+       License, and the Corresponding Application Code in a form
+       suitable for, and under terms that permit, the user to
+       recombine or relink the Application with a modified version of
+       the Linked Version to produce a modified Combined Work, in the
+       manner specified by section 6 of the GNU GPL for conveying
+       Corresponding Source.
+
+       1) Use a suitable shared library mechanism for linking with the
+       Library.  A suitable mechanism is one that (a) uses at run time
+       a copy of the Library already present on the user's computer
+       system, and (b) will operate properly with a modified version
+       of the Library that is interface-compatible with the Linked
+       Version.
+
+   e) Provide Installation Information, but only if you would otherwise
+   be required to provide such information under section 6 of the
+   GNU GPL, and only to the extent that such information is
+   necessary to install and execute a modified version of the
+   Combined Work produced by recombining or relinking the
+   Application with a modified version of the Linked Version. (If
+   you use option 4d0, the Installation Information must accompany
+   the Minimal Corresponding Source and Corresponding Application
+   Code. If you use option 4d1, you must provide the Installation
+   Information in the manner specified by section 6 of the GNU GPL
+   for conveying Corresponding Source.)
+
+  5. Combined Libraries.
+
+  You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+   a) Accompany the combined library with a copy of the same work based
+   on the Library, uncombined with any other library facilities,
+   conveyed under the terms of this License.
+
+   b) Give prominent notice with the combined library that part of it
+   is a work based on the Library, and explaining where to find the
+   accompanying uncombined form of the same work.
+
+  6. Revised Versions of the GNU Lesser General Public License.
+
+  The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+  Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+  If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.

+ 1 - 0
vendor/github.com/ChimeraCoder/tokenbucket/LICENSE

@@ -0,0 +1 @@
+COPYING

+ 48 - 0
vendor/github.com/ChimeraCoder/tokenbucket/README

@@ -0,0 +1,48 @@
+[![GoDoc](http://godoc.org/github.com/ChimeraCoder/tokenbucket?status.png)](http://godoc.org/github.com/ChimeraCoder/tokenbucket)
+
+tokenbucket
+====================
+
+This package provides an implementation of [Token bucket](https://en.wikipedia.org/wiki/Token_bucket) scheduling in Go. It is useful for implementing rate-limiting, traffic shaping, or other sorts of scheduling that depend on bandwidth constraints.
+
+
+Example
+------------
+
+
+To create a new bucket, specify a capacity (how many tokens can be stored "in the bank"), and a rate (how often a new token is added).
+
+````go
+
+    // Create a new bucket
+	// Allow a new action every 5 seconds, with a maximum of 3 "in the bank"
+	bucket := tokenbucket.NewBucket(3, 5 * time.Second)
+````
+
+This bucket should be shared between any functions that share the same constraints. (These functions may or may not run in separate goroutines).
+
+
+Anytime a regulated action is performed, spend a token.
+
+````go
+	// To perform a regulated action, we must spend a token
+	// RegulatedAction will not be performed until the bucket contains enough tokens
+	<-bucket.SpendToken(1)
+	RegulatedAction()
+````
+
+`SpendToken` returns immediately. Reading from the channel that it returns will block until the action has "permission" to continue (ie, until there are enough tokens in the bucket).
+
+
+(The channel that `SpendToken` returns is of type `error`. For now, the value will always be `nil`, so it can be ignored.)
+
+
+
+####License
+
+`tokenbucket` is free software provided under version 3 of the LGPL license.
+
+
+Software that uses `tokenbucket` may be released under *any* license, as long as the source code for `tokenbucket` (including any modifications) are made available under the LGPLv3 license.
+
+You do not need to release the rest of the software under the LGPL, or any free/open-source license, for that matter (though we would encourage you to do so!).

+ 1 - 0
vendor/github.com/ChimeraCoder/tokenbucket/README.md

@@ -0,0 +1 @@
+README

+ 86 - 0
vendor/github.com/ChimeraCoder/tokenbucket/tokenbucket.go

@@ -0,0 +1,86 @@
+package tokenbucket
+
+import (
+	"sync"
+	"time"
+)
+
+type Bucket struct {
+	capacity  int64
+	tokens    chan struct{}
+	rate      time.Duration // Add a token to the bucket every 1/r units of time
+	rateMutex sync.Mutex
+}
+
+func NewBucket(rate time.Duration, capacity int64) *Bucket {
+
+	//A bucket is simply a channel with a buffer representing the maximum size
+	tokens := make(chan struct{}, capacity)
+
+	b := &Bucket{capacity, tokens, rate, sync.Mutex{}}
+
+	//Set off a function that will continuously add tokens to the bucket
+	go func(b *Bucket) {
+		ticker := time.NewTicker(rate)
+		for _ = range ticker.C {
+			b.tokens <- struct{}{}
+		}
+	}(b)
+
+	return b
+}
+
+func (b *Bucket) GetRate() time.Duration {
+	b.rateMutex.Lock()
+	tmp := b.rate
+	b.rateMutex.Unlock()
+	return tmp
+}
+
+func (b *Bucket) SetRate(rate time.Duration) {
+	b.rateMutex.Lock()
+	b.rate = rate
+	b.rateMutex.Unlock()
+}
+
+//AddTokens manually adds n tokens to the bucket
+func (b *Bucket) AddToken(n int64) {
+}
+
+func (b *Bucket) withdrawTokens(n int64) error {
+	for i := int64(0); i < n; i++ {
+		<-b.tokens
+	}
+	return nil
+}
+
+func (b *Bucket) SpendToken(n int64) <-chan error {
+	// Default to spending a single token
+	if n < 0 {
+		n = 1
+	}
+
+	c := make(chan error)
+	go func(b *Bucket, n int64, c chan error) {
+		c <- b.withdrawTokens(n)
+		close(c)
+		return
+	}(b, n, c)
+
+	return c
+}
+
+// Drain will empty all tokens in the bucket
+// If the tokens are being added too quickly (if the rate is too fast)
+// this will never drain
+func (b *Bucket) Drain() error{
+    // TODO replace this with a more solid approach (such as replacing the channel altogether)
+    for {
+        select {
+            case _ = <-b.tokens:
+                continue
+            default:
+                return nil
+        }
+    }
+}

+ 22 - 0
vendor/github.com/azr/backoff/.gitignore

@@ -0,0 +1,22 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe

+ 2 - 0
vendor/github.com/azr/backoff/.travis.yml

@@ -0,0 +1,2 @@
+language: go
+go: 1.3.3

+ 20 - 0
vendor/github.com/azr/backoff/LICENSE

@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Cenk Altı
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+ 22 - 0
vendor/github.com/azr/backoff/README.md

@@ -0,0 +1,22 @@
+# backoff
+
+[![GoDoc](https://godoc.org/github.com/azr/backoff?status.png)](https://godoc.org/github.com/azr/backoff)
+[![Build Status](https://travis-ci.org/azr/backoff.png)](https://travis-ci.org/azr/backoff)
+
+This is a fork from the awesome [cenkalti/backoff](github.com/cenkalti/backoff) which is a go port from
+[google-http-java-client](https://code.google.com/p/google-http-java-client/wiki/ExponentialBackoff).
+
+This BackOff sleeps upon BackOff() and calculates its next backoff time instead of returning the duration to sleep.
+
+[Exponential backoff](http://en.wikipedia.org/wiki/Exponential_backoff)
+is an algorithm that uses feedback to multiplicatively decrease the rate of some process,
+in order to gradually find an acceptable rate.
+The retries exponentially increase and stop increasing when a certain threshold is met.
+
+
+
+## Install
+
+```bash
+go get github.com/azr/backoff
+```

+ 51 - 0
vendor/github.com/azr/backoff/backoff.go

@@ -0,0 +1,51 @@
+//Package backoff helps you at backing off !
+//
+//It was forked from github.com/cenkalti/backoff which is awesome.
+//
+//This BackOff sleeps upon BackOff() and calculates its next backoff time instead of returning the duration to sleep.
+package backoff
+
+import "time"
+
+// Interface interface to use after a retryable operation failed.
+// A Interface.BackOff sleeps.
+type Interface interface {
+	// Example usage:
+	//
+	//   for ;; {
+	//       err, canRetry := somethingThatCanFail()
+	//       if err != nil && canRetry {
+	//           backoffer.Backoff()
+	//       }
+	//   }
+	BackOff()
+
+	// Reset to initial state.
+	Reset()
+}
+
+// ZeroBackOff is a fixed back-off policy whose back-off time is always zero,
+// meaning that the operation is retried immediately without waiting.
+type ZeroBackOff struct{}
+
+var _ Interface = (*ZeroBackOff)(nil)
+
+func (b *ZeroBackOff) Reset() {}
+
+func (b *ZeroBackOff) BackOff() {}
+
+type ConstantBackOff struct {
+	Interval time.Duration
+}
+
+var _ Interface = (*ConstantBackOff)(nil)
+
+func (b *ConstantBackOff) Reset() {}
+
+func (b *ConstantBackOff) BackOff() {
+	time.Sleep(b.Interval)
+}
+
+func NewConstant(d time.Duration) *ConstantBackOff {
+	return &ConstantBackOff{Interval: d}
+}

+ 112 - 0
vendor/github.com/azr/backoff/exponential.go

@@ -0,0 +1,112 @@
+package backoff
+
+import (
+	"math/rand"
+	"time"
+)
+
+/*
+ExponentialBackOff is an implementation of BackOff that increases
+it's back off period for each retry attempt using a randomization function
+that grows exponentially.
+Backoff() time is calculated using the following formula:
+    randomized_interval =
+        retry_interval * (random value in range [1 - randomization_factor, 1 + randomization_factor])
+In other words BackOff() will sleep for times between the randomization factor
+percentage below and above the retry interval.
+For example, using 2 seconds as the base retry interval and 0.5 as the
+randomization factor, the actual back off period used in the next retry
+attempt will be between 1 and 3 seconds.
+
+Note: max_interval caps the retry_interval and not the randomized_interval.
+
+Example: The default retry_interval is .5 seconds, default randomization_factor
+is 0.5, default multiplier is 1.5 and the max_interval is set to 25 seconds.
+For 12 tries the sequence will sleep (values in seconds) (output from ExampleExpBackOffTimes) :
+
+    request#     retry_interval     randomized_interval
+
+    1             0.5                [0.25,   0.75]
+    2             0.75               [0.375,  1.125]
+    3             1.125              [0.562,  1.687]
+    4             1.687              [0.8435, 2.53]
+    5             2.53               [1.265,  3.795]
+    6             3.795              [1.897,  5.692]
+    7             5.692              [2.846,  8.538]
+    8             8.538              [4.269, 12.807]
+    9            12.807              [6.403, 19.210]
+    10           19.22               [9.611, 28.833]
+    11           25                  [12.5,  37.5]
+    12           25                  [12.5,  37.5]
+Implementation is not thread-safe.
+*/
+type ExponentialBackOff struct {
+	InitialInterval time.Duration
+	currentInterval time.Duration
+	MaxInterval     time.Duration
+
+	RandomizationFactor float64
+	Multiplier          float64
+}
+
+// Default values for ExponentialBackOff.
+const (
+	DefaultInitialInterval     = 500 * time.Millisecond
+	DefaultRandomizationFactor = 0.5
+	DefaultMultiplier          = 1.5
+	DefaultMaxInterval         = 60 * time.Second
+)
+
+// NewExponential creates an instance of ExponentialBackOff using default values.
+func NewExponential() *ExponentialBackOff {
+	b := &ExponentialBackOff{
+		InitialInterval:     DefaultInitialInterval,
+		RandomizationFactor: DefaultRandomizationFactor,
+		Multiplier:          DefaultMultiplier,
+		MaxInterval:         DefaultMaxInterval,
+		currentInterval:     DefaultInitialInterval,
+	}
+	b.Reset()
+	return b
+}
+
+// Reset the interval back to the initial retry interval and restarts the timer.
+func (b *ExponentialBackOff) Reset() {
+	b.currentInterval = b.InitialInterval
+}
+
+func (b *ExponentialBackOff) GetSleepTime() time.Duration {
+	return getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval)
+}
+
+func (b *ExponentialBackOff) BackOff() {
+	time.Sleep(b.GetSleepTime())
+
+	b.IncrementCurrentInterval()
+}
+
+// Increments the current interval by multiplying it with the multiplier.
+func (b *ExponentialBackOff) IncrementCurrentInterval() {
+	// Check for overflow, if overflow is detected set the current interval to the max interval.
+	if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier {
+		b.currentInterval = b.MaxInterval
+	} else {
+		b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier)
+	}
+}
+
+func (b *ExponentialBackOff) Inverval() time.Duration {
+	return b.currentInterval
+}
+
+// Returns a random value from the interval:
+//  [randomizationFactor * currentInterval, randomizationFactor * currentInterval].
+func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration {
+	var delta = randomizationFactor * float64(currentInterval)
+	var minInterval = float64(currentInterval) - delta
+	var maxInterval = float64(currentInterval) + delta
+	// Get a random value from the range [minInterval, maxInterval].
+	// The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then
+	// we want a 33% chance for selecting either 1, 2 or 3.
+	return time.Duration(minInterval + (random * (maxInterval - minInterval + 1)))
+}

+ 44 - 0
vendor/github.com/azr/backoff/linear.go

@@ -0,0 +1,44 @@
+package backoff
+
+// LinearBackOff is a back-off policy whose back-off time is multiplied by mult and incremented by incr
+// each time it is called.
+// mult can be one ;).
+import "time"
+
+// grows linearly until
+type LinearBackOff struct {
+	InitialInterval time.Duration
+	Multiplier      float64
+	Increment       time.Duration
+	MaxInterval     time.Duration
+	currentInterval time.Duration
+}
+
+var _ Interface = (*LinearBackOff)(nil)
+
+func NewLinear(from, to, incr time.Duration, mult float64) *LinearBackOff {
+	return &LinearBackOff{
+		InitialInterval: from,
+		MaxInterval:     to,
+		currentInterval: from,
+		Increment:       incr,
+		Multiplier:      mult,
+	}
+}
+
+func (lb *LinearBackOff) Reset() {
+	lb.currentInterval = lb.InitialInterval
+}
+
+func (lb *LinearBackOff) increment() {
+	lb.currentInterval = time.Duration(float64(lb.currentInterval) * lb.Multiplier)
+	lb.currentInterval += lb.Increment
+	if lb.currentInterval > lb.MaxInterval {
+		lb.currentInterval = lb.MaxInterval
+	}
+}
+
+func (lb *LinearBackOff) BackOff() {
+	time.Sleep(lb.currentInterval)
+	lb.increment()
+}

+ 2 - 0
vendor/github.com/dustin/go-jsonpointer/.gitignore

@@ -0,0 +1,2 @@
+#*
+*~

+ 19 - 0
vendor/github.com/dustin/go-jsonpointer/LICENSE

@@ -0,0 +1,19 @@
+Copyright (c) 2013 Dustin Sallings
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.

+ 5 - 0
vendor/github.com/dustin/go-jsonpointer/README.markdown

@@ -0,0 +1,5 @@
+# JSON Pointer for go
+
+This is an implementation of [JSON Pointer](http://tools.ietf.org/html/rfc6901).
+
+[![Coverage Status](https://coveralls.io/repos/dustin/go-jsonpointer/badge.png?branch=master)](https://coveralls.io/r/dustin/go-jsonpointer?branch=master)

+ 328 - 0
vendor/github.com/dustin/go-jsonpointer/bytes.go

@@ -0,0 +1,328 @@
+package jsonpointer
+
+import (
+	"fmt"
+	"sort"
+	"strconv"
+	"strings"
+
+	"github.com/dustin/gojson"
+)
+
+func arreq(a, b []string) bool {
+	if len(a) == len(b) {
+		for i := range a {
+			if a[i] != b[i] {
+				return false
+			}
+		}
+		return true
+	}
+
+	return false
+}
+
+func unescape(s string) string {
+	n := strings.Count(s, "~")
+	if n == 0 {
+		return s
+	}
+
+	t := make([]byte, len(s)-n+1) // remove one char per ~
+	w := 0
+	start := 0
+	for i := 0; i < n; i++ {
+		j := start + strings.Index(s[start:], "~")
+		w += copy(t[w:], s[start:j])
+		if len(s) < j+2 {
+			t[w] = '~'
+			w++
+			break
+		}
+		c := s[j+1]
+		switch c {
+		case '0':
+			t[w] = '~'
+			w++
+		case '1':
+			t[w] = '/'
+			w++
+		default:
+			t[w] = '~'
+			w++
+			t[w] = c
+			w++
+		}
+		start = j + 2
+	}
+	w += copy(t[w:], s[start:])
+	return string(t[0:w])
+}
+
+func parsePointer(s string) []string {
+	a := strings.Split(s[1:], "/")
+	if !strings.Contains(s, "~") {
+		return a
+	}
+
+	for i := range a {
+		if strings.Contains(a[i], "~") {
+			a[i] = unescape(a[i])
+		}
+	}
+	return a
+}
+
+func escape(s string, out []rune) []rune {
+	for _, c := range s {
+		switch c {
+		case '/':
+			out = append(out, '~', '1')
+		case '~':
+			out = append(out, '~', '0')
+		default:
+			out = append(out, c)
+		}
+	}
+	return out
+}
+
+func encodePointer(p []string) string {
+	out := make([]rune, 0, 64)
+
+	for _, s := range p {
+		out = append(out, '/')
+		out = escape(s, out)
+	}
+	return string(out)
+}
+
+func grokLiteral(b []byte) string {
+	s, ok := json.UnquoteBytes(b)
+	if !ok {
+		panic("could not grok literal " + string(b))
+	}
+	return string(s)
+}
+
+func isSpace(c rune) bool {
+	return c == ' ' || c == '\t' || c == '\r' || c == '\n'
+}
+
+// FindDecode finds an object by JSONPointer path and then decode the
+// result into a user-specified object.  Errors if a properly
+// formatted JSON document can't be found at the given path.
+func FindDecode(data []byte, path string, into interface{}) error {
+	d, err := Find(data, path)
+	if err != nil {
+		return err
+	}
+	return json.Unmarshal(d, into)
+}
+
+// Find a section of raw JSON by specifying a JSONPointer.
+func Find(data []byte, path string) ([]byte, error) {
+	if path == "" {
+		return data, nil
+	}
+
+	needle := parsePointer(path)
+
+	scan := &json.Scanner{}
+	scan.Reset()
+
+	offset := 0
+	beganLiteral := 0
+	current := make([]string, 0, 32)
+	for {
+		if offset >= len(data) {
+			break
+		}
+		newOp := scan.Step(scan, int(data[offset]))
+		offset++
+
+		switch newOp {
+		case json.ScanBeginArray:
+			current = append(current, "0")
+		case json.ScanObjectKey:
+			current[len(current)-1] = grokLiteral(data[beganLiteral-1 : offset-1])
+		case json.ScanBeginLiteral:
+			beganLiteral = offset
+		case json.ScanArrayValue:
+			n := mustParseInt(current[len(current)-1])
+			current[len(current)-1] = strconv.Itoa(n + 1)
+		case json.ScanEndArray, json.ScanEndObject:
+			current = sliceToEnd(current)
+		case json.ScanBeginObject:
+			current = append(current, "")
+		case json.ScanContinue, json.ScanSkipSpace, json.ScanObjectValue, json.ScanEnd:
+		default:
+			return nil, fmt.Errorf("found unhandled json op: %v", newOp)
+		}
+
+		if (newOp == json.ScanBeginArray || newOp == json.ScanArrayValue ||
+			newOp == json.ScanObjectKey) && arreq(needle, current) {
+			otmp := offset
+			for isSpace(rune(data[otmp])) {
+				otmp++
+			}
+			if data[otmp] == ']' {
+				// special case an array offset miss
+				offset = otmp
+				return nil, nil
+			}
+			val, _, err := json.NextValue(data[offset:], scan)
+			return val, err
+		}
+	}
+
+	return nil, nil
+}
+
+func sliceToEnd(s []string) []string {
+	end := len(s) - 1
+	if end >= 0 {
+		s = s[:end]
+	}
+	return s
+
+}
+
+func mustParseInt(s string) int {
+	n, err := strconv.Atoi(s)
+	if err == nil {
+		return n
+	}
+	panic(err)
+}
+
+// ListPointers lists all possible pointers from the given input.
+func ListPointers(data []byte) ([]string, error) {
+	if len(data) == 0 {
+		return nil, fmt.Errorf("Invalid JSON")
+	}
+	rv := []string{""}
+
+	scan := &json.Scanner{}
+	scan.Reset()
+
+	offset := 0
+	beganLiteral := 0
+	var current []string
+	for {
+		if offset >= len(data) {
+			return rv, nil
+		}
+		newOp := scan.Step(scan, int(data[offset]))
+		offset++
+
+		switch newOp {
+		case json.ScanBeginArray:
+			current = append(current, "0")
+		case json.ScanObjectKey:
+			current[len(current)-1] = grokLiteral(data[beganLiteral-1 : offset-1])
+		case json.ScanBeginLiteral:
+			beganLiteral = offset
+		case json.ScanArrayValue:
+			n := mustParseInt(current[len(current)-1])
+			current[len(current)-1] = strconv.Itoa(n + 1)
+		case json.ScanEndArray, json.ScanEndObject:
+			current = sliceToEnd(current)
+		case json.ScanBeginObject:
+			current = append(current, "")
+		case json.ScanError:
+			return nil, fmt.Errorf("Error reading JSON object at offset %v", offset)
+		}
+
+		if newOp == json.ScanBeginArray || newOp == json.ScanArrayValue ||
+			newOp == json.ScanObjectKey {
+			rv = append(rv, encodePointer(current))
+		}
+	}
+}
+
+// FindMany finds several jsonpointers in one pass through the input.
+func FindMany(data []byte, paths []string) (map[string][]byte, error) {
+	tpaths := make([]string, 0, len(paths))
+	m := map[string][]byte{}
+	for _, p := range paths {
+		if p == "" {
+			m[p] = data
+		} else {
+			tpaths = append(tpaths, p)
+		}
+	}
+	sort.Strings(tpaths)
+
+	scan := &json.Scanner{}
+	scan.Reset()
+
+	offset := 0
+	todo := len(tpaths)
+	beganLiteral := 0
+	matchedAt := 0
+	var current []string
+	for todo > 0 {
+		if offset >= len(data) {
+			break
+		}
+		newOp := scan.Step(scan, int(data[offset]))
+		offset++
+
+		switch newOp {
+		case json.ScanBeginArray:
+			current = append(current, "0")
+		case json.ScanObjectKey:
+			current[len(current)-1] = grokLiteral(data[beganLiteral-1 : offset-1])
+		case json.ScanBeginLiteral:
+			beganLiteral = offset
+		case json.ScanArrayValue:
+			n := mustParseInt(current[len(current)-1])
+			current[len(current)-1] = strconv.Itoa(n + 1)
+		case json.ScanEndArray, json.ScanEndObject:
+			current = sliceToEnd(current)
+		case json.ScanBeginObject:
+			current = append(current, "")
+		}
+
+		if newOp == json.ScanBeginArray || newOp == json.ScanArrayValue ||
+			newOp == json.ScanObjectKey {
+
+			if matchedAt < len(current)-1 {
+				continue
+			}
+			if matchedAt > len(current) {
+				matchedAt = len(current)
+			}
+
+			currentStr := encodePointer(current)
+			off := sort.SearchStrings(tpaths, currentStr)
+			if off < len(tpaths) {
+				// Check to see if the path we're
+				// going down could even lead to a
+				// possible match.
+				if strings.HasPrefix(tpaths[off], currentStr) {
+					matchedAt++
+				}
+				// And if it's not an exact match, keep parsing.
+				if tpaths[off] != currentStr {
+					continue
+				}
+			} else {
+				// Fell of the end of the list, no possible match
+				continue
+			}
+
+			// At this point, we have an exact match, so grab it.
+			stmp := &json.Scanner{}
+			val, _, err := json.NextValue(data[offset:], stmp)
+			if err != nil {
+				return m, err
+			}
+			m[currentStr] = val
+			todo--
+		}
+	}
+
+	return m, nil
+}

+ 2 - 0
vendor/github.com/dustin/go-jsonpointer/doc.go

@@ -0,0 +1,2 @@
+// Package jsonpointer implements RFC6901 JSON Pointers
+package jsonpointer

+ 38 - 0
vendor/github.com/dustin/go-jsonpointer/map.go

@@ -0,0 +1,38 @@
+package jsonpointer
+
+import (
+	"strconv"
+	"strings"
+)
+
+// Get the value at the specified path.
+func Get(m map[string]interface{}, path string) interface{} {
+	if path == "" {
+		return m
+	}
+
+	parts := strings.Split(path[1:], "/")
+	var rv interface{} = m
+
+	for _, p := range parts {
+		switch v := rv.(type) {
+		case map[string]interface{}:
+			if strings.Contains(p, "~") {
+				p = strings.Replace(p, "~1", "/", -1)
+				p = strings.Replace(p, "~0", "~", -1)
+			}
+			rv = v[p]
+		case []interface{}:
+			i, err := strconv.Atoi(p)
+			if err == nil && i < len(v) {
+				rv = v[i]
+			} else {
+				return nil
+			}
+		default:
+			return nil
+		}
+	}
+
+	return rv
+}

+ 171 - 0
vendor/github.com/dustin/go-jsonpointer/reflect.go

@@ -0,0 +1,171 @@
+package jsonpointer
+
+import (
+	"reflect"
+	"strconv"
+	"strings"
+)
+
+// Reflect gets the value at the specified path from a struct.
+func Reflect(o interface{}, path string) interface{} {
+	if path == "" {
+		return o
+	}
+
+	parts := parsePointer(path)
+	var rv interface{} = o
+
+OUTER:
+	for _, p := range parts {
+		val := reflect.ValueOf(rv)
+		if val.Kind() == reflect.Ptr {
+			val = val.Elem()
+		}
+
+		if val.Kind() == reflect.Struct {
+			typ := val.Type()
+			for i := 0; i < typ.NumField(); i++ {
+				sf := typ.Field(i)
+				tag := sf.Tag.Get("json")
+				name := parseJSONTagName(tag)
+				if (name != "" && name == p) || sf.Name == p {
+					rv = val.Field(i).Interface()
+					continue OUTER
+				}
+			}
+			// Found no matching field.
+			return nil
+		} else if val.Kind() == reflect.Map {
+			// our pointer always gives us a string key
+			// here we try to convert it into the correct type
+			mapKey, canConvert := makeMapKeyFromString(val.Type().Key(), p)
+			if canConvert {
+				field := val.MapIndex(mapKey)
+				if field.IsValid() {
+					rv = field.Interface()
+				} else {
+					return nil
+				}
+			} else {
+				return nil
+			}
+		} else if val.Kind() == reflect.Slice || val.Kind() == reflect.Array {
+			i, err := strconv.Atoi(p)
+			if err == nil && i < val.Len() {
+				rv = val.Index(i).Interface()
+			} else {
+				return nil
+			}
+		} else {
+			return nil
+		}
+	}
+
+	return rv
+}
+
+// ReflectListPointers lists all possible pointers from the given struct.
+func ReflectListPointers(o interface{}) ([]string, error) {
+	return reflectListPointersRecursive(o, ""), nil
+}
+
+func reflectListPointersRecursive(o interface{}, prefix string) []string {
+	rv := []string{prefix + ""}
+
+	val := reflect.ValueOf(o)
+	if val.Kind() == reflect.Ptr {
+		val = val.Elem()
+	}
+
+	if val.Kind() == reflect.Struct {
+
+		typ := val.Type()
+		for i := 0; i < typ.NumField(); i++ {
+			child := val.Field(i).Interface()
+			sf := typ.Field(i)
+			tag := sf.Tag.Get("json")
+			name := parseJSONTagName(tag)
+			if name != "" {
+				// use the tag name
+				childReults := reflectListPointersRecursive(child, prefix+encodePointer([]string{name}))
+				rv = append(rv, childReults...)
+			} else {
+				// use the original field name
+				childResults := reflectListPointersRecursive(child, prefix+encodePointer([]string{sf.Name}))
+				rv = append(rv, childResults...)
+			}
+		}
+
+	} else if val.Kind() == reflect.Map {
+		for _, k := range val.MapKeys() {
+			child := val.MapIndex(k).Interface()
+			mapKeyName := makeMapKeyName(k)
+			childReults := reflectListPointersRecursive(child, prefix+encodePointer([]string{mapKeyName}))
+			rv = append(rv, childReults...)
+		}
+	} else if val.Kind() == reflect.Slice || val.Kind() == reflect.Array {
+		for i := 0; i < val.Len(); i++ {
+			child := val.Index(i).Interface()
+			childResults := reflectListPointersRecursive(child, prefix+encodePointer([]string{strconv.Itoa(i)}))
+			rv = append(rv, childResults...)
+		}
+	}
+	return rv
+}
+
+// makeMapKeyName takes a map key value and creates a string representation
+func makeMapKeyName(v reflect.Value) string {
+	switch v.Kind() {
+	case reflect.Float32, reflect.Float64:
+		fv := v.Float()
+		return strconv.FormatFloat(fv, 'f', -1, v.Type().Bits())
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		iv := v.Int()
+		return strconv.FormatInt(iv, 10)
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		iv := v.Uint()
+		return strconv.FormatUint(iv, 10)
+	default:
+		return v.String()
+	}
+}
+
+// makeMapKeyFromString takes the key type for a map, and a string
+// representing the key, it then tries to convert the string
+// representation into a value of the correct type.
+func makeMapKeyFromString(mapKeyType reflect.Type, pointer string) (reflect.Value, bool) {
+	valp := reflect.New(mapKeyType)
+	val := reflect.Indirect(valp)
+	switch mapKeyType.Kind() {
+	case reflect.String:
+		return reflect.ValueOf(pointer), true
+	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+		iv, err := strconv.ParseInt(pointer, 10, mapKeyType.Bits())
+		if err == nil {
+			val.SetInt(iv)
+			return val, true
+		}
+	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+		iv, err := strconv.ParseUint(pointer, 10, mapKeyType.Bits())
+		if err == nil {
+			val.SetUint(iv)
+			return val, true
+		}
+	case reflect.Float32, reflect.Float64:
+		fv, err := strconv.ParseFloat(pointer, mapKeyType.Bits())
+		if err == nil {
+			val.SetFloat(fv)
+			return val, true
+		}
+	}
+
+	return reflect.ValueOf(nil), false
+}
+
+// parseJSONTagName extracts the JSON field name from a struct tag
+func parseJSONTagName(tag string) string {
+	if idx := strings.Index(tag, ","); idx != -1 {
+		return tag[:idx]
+	}
+	return tag
+}

+ 2 - 0
vendor/github.com/dustin/gojson/.gitignore

@@ -0,0 +1,2 @@
+#*
+*~

+ 27 - 0
vendor/github.com/dustin/gojson/LICENSE

@@ -0,0 +1,27 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 1089 - 0
vendor/github.com/dustin/gojson/decode.go

@@ -0,0 +1,1089 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Represents JSON data structure using native Go types: booleans, floats,
+// strings, arrays, and maps.
+
+package json
+
+import (
+	"bytes"
+	"encoding"
+	"encoding/base64"
+	"errors"
+	"fmt"
+	"reflect"
+	"runtime"
+	"strconv"
+	"unicode"
+	"unicode/utf16"
+	"unicode/utf8"
+)
+
+// Unmarshal parses the JSON-encoded data and stores the result
+// in the value pointed to by v.
+//
+// Unmarshal uses the inverse of the encodings that
+// Marshal uses, allocating maps, slices, and pointers as necessary,
+// with the following additional rules:
+//
+// To unmarshal JSON into a pointer, Unmarshal first handles the case of
+// the JSON being the JSON literal null.  In that case, Unmarshal sets
+// the pointer to nil.  Otherwise, Unmarshal unmarshals the JSON into
+// the value pointed at by the pointer.  If the pointer is nil, Unmarshal
+// allocates a new value for it to point to.
+//
+// To unmarshal JSON into a struct, Unmarshal matches incoming object
+// keys to the keys used by Marshal (either the struct field name or its tag),
+// preferring an exact match but also accepting a case-insensitive match.
+//
+// To unmarshal JSON into an interface value,
+// Unmarshal stores one of these in the interface value:
+//
+//	bool, for JSON booleans
+//	float64, for JSON numbers
+//	string, for JSON strings
+//	[]interface{}, for JSON arrays
+//	map[string]interface{}, for JSON objects
+//	nil for JSON null
+//
+// If a JSON value is not appropriate for a given target type,
+// or if a JSON number overflows the target type, Unmarshal
+// skips that field and completes the unmarshalling as best it can.
+// If no more serious errors are encountered, Unmarshal returns
+// an UnmarshalTypeError describing the earliest such error.
+//
+// The JSON null value unmarshals into an interface, map, pointer, or slice
+// by setting that Go value to nil. Because null is often used in JSON to mean
+// ``not present,'' unmarshaling a JSON null into any other Go type has no effect
+// on the value and produces no error.
+//
+// When unmarshaling quoted strings, invalid UTF-8 or
+// invalid UTF-16 surrogate pairs are not treated as an error.
+// Instead, they are replaced by the Unicode replacement
+// character U+FFFD.
+//
+func Unmarshal(data []byte, v interface{}) error {
+	// Check for well-formedness.
+	// Avoids filling out half a data structure
+	// before discovering a JSON syntax error.
+	var d decodeState
+	err := checkValid(data, &d.scan)
+	if err != nil {
+		return err
+	}
+
+	d.init(data)
+	return d.unmarshal(v)
+}
+
+// Unmarshaler is the interface implemented by objects
+// that can unmarshal a JSON description of themselves.
+// The input can be assumed to be a valid encoding of
+// a JSON value. UnmarshalJSON must copy the JSON data
+// if it wishes to retain the data after returning.
+type Unmarshaler interface {
+	UnmarshalJSON([]byte) error
+}
+
+// An UnmarshalTypeError describes a JSON value that was
+// not appropriate for a value of a specific Go type.
+type UnmarshalTypeError struct {
+	Value string       // description of JSON value - "bool", "array", "number -5"
+	Type  reflect.Type // type of Go value it could not be assigned to
+}
+
+func (e *UnmarshalTypeError) Error() string {
+	return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String()
+}
+
+// An UnmarshalFieldError describes a JSON object key that
+// led to an unexported (and therefore unwritable) struct field.
+// (No longer used; kept for compatibility.)
+type UnmarshalFieldError struct {
+	Key   string
+	Type  reflect.Type
+	Field reflect.StructField
+}
+
+func (e *UnmarshalFieldError) Error() string {
+	return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String()
+}
+
+// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal.
+// (The argument to Unmarshal must be a non-nil pointer.)
+type InvalidUnmarshalError struct {
+	Type reflect.Type
+}
+
+func (e *InvalidUnmarshalError) Error() string {
+	if e.Type == nil {
+		return "json: Unmarshal(nil)"
+	}
+
+	if e.Type.Kind() != reflect.Ptr {
+		return "json: Unmarshal(non-pointer " + e.Type.String() + ")"
+	}
+	return "json: Unmarshal(nil " + e.Type.String() + ")"
+}
+
+func (d *decodeState) unmarshal(v interface{}) (err error) {
+	defer func() {
+		if r := recover(); r != nil {
+			if _, ok := r.(runtime.Error); ok {
+				panic(r)
+			}
+			err = r.(error)
+		}
+	}()
+
+	rv := reflect.ValueOf(v)
+	if rv.Kind() != reflect.Ptr || rv.IsNil() {
+		return &InvalidUnmarshalError{reflect.TypeOf(v)}
+	}
+
+	d.scan.Reset()
+	// We decode rv not rv.Elem because the Unmarshaler interface
+	// test must be applied at the top level of the value.
+	d.value(rv)
+	return d.savedError
+}
+
+// A Number represents a JSON number literal.
+type Number string
+
+// String returns the literal text of the number.
+func (n Number) String() string { return string(n) }
+
+// Float64 returns the number as a float64.
+func (n Number) Float64() (float64, error) {
+	return strconv.ParseFloat(string(n), 64)
+}
+
+// Int64 returns the number as an int64.
+func (n Number) Int64() (int64, error) {
+	return strconv.ParseInt(string(n), 10, 64)
+}
+
+// decodeState represents the state while decoding a JSON value.
+type decodeState struct {
+	data       []byte
+	off        int // read offset in data
+	scan       Scanner
+	nextscan   Scanner // for calls to NextValue
+	savedError error
+	useNumber  bool
+}
+
+// errPhase is used for errors that should not happen unless
+// there is a bug in the JSON decoder or something is editing
+// the data slice while the decoder executes.
+var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?")
+
+func (d *decodeState) init(data []byte) *decodeState {
+	d.data = data
+	d.off = 0
+	d.savedError = nil
+	return d
+}
+
+// error aborts the decoding by panicking with err.
+func (d *decodeState) error(err error) {
+	panic(err)
+}
+
+// saveError saves the first err it is called with,
+// for reporting at the end of the unmarshal.
+func (d *decodeState) saveError(err error) {
+	if d.savedError == nil {
+		d.savedError = err
+	}
+}
+
+// next cuts off and returns the next full JSON value in d.data[d.off:].
+// The next value is known to be an object or array, not a literal.
+func (d *decodeState) next() []byte {
+	c := d.data[d.off]
+	item, rest, err := NextValue(d.data[d.off:], &d.nextscan)
+	if err != nil {
+		d.error(err)
+	}
+	d.off = len(d.data) - len(rest)
+
+	// Our scanner has seen the opening brace/bracket
+	// and thinks we're still in the middle of the object.
+	// invent a closing brace/bracket to get it out.
+	if c == '{' {
+		d.scan.Step(&d.scan, '}')
+	} else {
+		d.scan.Step(&d.scan, ']')
+	}
+
+	return item
+}
+
+// scanWhile processes bytes in d.data[d.off:] until it
+// receives a scan code not equal to op.
+// It updates d.off and returns the new scan code.
+func (d *decodeState) scanWhile(op int) int {
+	var newOp int
+	for {
+		if d.off >= len(d.data) {
+			newOp = d.scan.EOF()
+			d.off = len(d.data) + 1 // mark processed EOF with len+1
+		} else {
+			c := int(d.data[d.off])
+			d.off++
+			newOp = d.scan.Step(&d.scan, c)
+		}
+		if newOp != op {
+			break
+		}
+	}
+	return newOp
+}
+
+// value decodes a JSON value from d.data[d.off:] into the value.
+// it updates d.off to point past the decoded value.
+func (d *decodeState) value(v reflect.Value) {
+	if !v.IsValid() {
+		_, rest, err := NextValue(d.data[d.off:], &d.nextscan)
+		if err != nil {
+			d.error(err)
+		}
+		d.off = len(d.data) - len(rest)
+
+		// d.scan thinks we're still at the beginning of the item.
+		// Feed in an empty string - the shortest, simplest value -
+		// so that it knows we got to the end of the value.
+		if d.scan.redo {
+			// rewind.
+			d.scan.redo = false
+			d.scan.Step = stateBeginValue
+		}
+		d.scan.Step(&d.scan, '"')
+		d.scan.Step(&d.scan, '"')
+
+		n := len(d.scan.parseState)
+		if n > 0 && d.scan.parseState[n-1] == parseObjectKey {
+			// d.scan thinks we just read an object key; finish the object
+			d.scan.Step(&d.scan, ':')
+			d.scan.Step(&d.scan, '"')
+			d.scan.Step(&d.scan, '"')
+			d.scan.Step(&d.scan, '}')
+		}
+
+		return
+	}
+
+	switch op := d.scanWhile(ScanSkipSpace); op {
+	default:
+		d.error(errPhase)
+
+	case ScanBeginArray:
+		d.array(v)
+
+	case ScanBeginObject:
+		d.object(v)
+
+	case ScanBeginLiteral:
+		d.literal(v)
+	}
+}
+
+type unquotedValue struct{}
+
+// valueQuoted is like value but decodes a
+// quoted string literal or literal null into an interface value.
+// If it finds anything other than a quoted string literal or null,
+// valueQuoted returns unquotedValue{}.
+func (d *decodeState) valueQuoted() interface{} {
+	switch op := d.scanWhile(ScanSkipSpace); op {
+	default:
+		d.error(errPhase)
+
+	case ScanBeginArray:
+		d.array(reflect.Value{})
+
+	case ScanBeginObject:
+		d.object(reflect.Value{})
+
+	case ScanBeginLiteral:
+		switch v := d.literalInterface().(type) {
+		case nil, string:
+			return v
+		}
+	}
+	return unquotedValue{}
+}
+
+// indirect walks down v allocating pointers as needed,
+// until it gets to a non-pointer.
+// if it encounters an Unmarshaler, indirect stops and returns that.
+// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
+func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
+	// If v is a named type and is addressable,
+	// start with its address, so that if the type has pointer methods,
+	// we find them.
+	if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
+		v = v.Addr()
+	}
+	for {
+		// Load value from interface, but only if the result will be
+		// usefully addressable.
+		if v.Kind() == reflect.Interface && !v.IsNil() {
+			e := v.Elem()
+			if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
+				v = e
+				continue
+			}
+		}
+
+		if v.Kind() != reflect.Ptr {
+			break
+		}
+
+		if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
+			break
+		}
+		if v.IsNil() {
+			v.Set(reflect.New(v.Type().Elem()))
+		}
+		if v.Type().NumMethod() > 0 {
+			if u, ok := v.Interface().(Unmarshaler); ok {
+				return u, nil, reflect.Value{}
+			}
+			if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
+				return nil, u, reflect.Value{}
+			}
+		}
+		v = v.Elem()
+	}
+	return nil, nil, v
+}
+
+// array consumes an array from d.data[d.off-1:], decoding into the value v.
+// the first byte of the array ('[') has been read already.
+func (d *decodeState) array(v reflect.Value) {
+	// Check for unmarshaler.
+	u, ut, pv := d.indirect(v, false)
+	if u != nil {
+		d.off--
+		err := u.UnmarshalJSON(d.next())
+		if err != nil {
+			d.error(err)
+		}
+		return
+	}
+	if ut != nil {
+		d.saveError(&UnmarshalTypeError{"array", v.Type()})
+		d.off--
+		d.next()
+		return
+	}
+
+	v = pv
+
+	// Check type of target.
+	switch v.Kind() {
+	case reflect.Interface:
+		if v.NumMethod() == 0 {
+			// Decoding into nil interface?  Switch to non-reflect code.
+			v.Set(reflect.ValueOf(d.arrayInterface()))
+			return
+		}
+		// Otherwise it's invalid.
+		fallthrough
+	default:
+		d.saveError(&UnmarshalTypeError{"array", v.Type()})
+		d.off--
+		d.next()
+		return
+	case reflect.Array:
+	case reflect.Slice:
+		break
+	}
+
+	i := 0
+	for {
+		// Look ahead for ] - can only happen on first iteration.
+		op := d.scanWhile(ScanSkipSpace)
+		if op == ScanEndArray {
+			break
+		}
+
+		// Back up so d.value can have the byte we just read.
+		d.off--
+		d.scan.undo(op)
+
+		// Get element of array, growing if necessary.
+		if v.Kind() == reflect.Slice {
+			// Grow slice if necessary
+			if i >= v.Cap() {
+				newcap := v.Cap() + v.Cap()/2
+				if newcap < 4 {
+					newcap = 4
+				}
+				newv := reflect.MakeSlice(v.Type(), v.Len(), newcap)
+				reflect.Copy(newv, v)
+				v.Set(newv)
+			}
+			if i >= v.Len() {
+				v.SetLen(i + 1)
+			}
+		}
+
+		if i < v.Len() {
+			// Decode into element.
+			d.value(v.Index(i))
+		} else {
+			// Ran out of fixed array: skip.
+			d.value(reflect.Value{})
+		}
+		i++
+
+		// Next token must be , or ].
+		op = d.scanWhile(ScanSkipSpace)
+		if op == ScanEndArray {
+			break
+		}
+		if op != ScanArrayValue {
+			d.error(errPhase)
+		}
+	}
+
+	if i < v.Len() {
+		if v.Kind() == reflect.Array {
+			// Array.  Zero the rest.
+			z := reflect.Zero(v.Type().Elem())
+			for ; i < v.Len(); i++ {
+				v.Index(i).Set(z)
+			}
+		} else {
+			v.SetLen(i)
+		}
+	}
+	if i == 0 && v.Kind() == reflect.Slice {
+		v.Set(reflect.MakeSlice(v.Type(), 0, 0))
+	}
+}
+
+var nullLiteral = []byte("null")
+
+// object consumes an object from d.data[d.off-1:], decoding into the value v.
+// the first byte ('{') of the object has been read already.
+func (d *decodeState) object(v reflect.Value) {
+	// Check for unmarshaler.
+	u, ut, pv := d.indirect(v, false)
+	if u != nil {
+		d.off--
+		err := u.UnmarshalJSON(d.next())
+		if err != nil {
+			d.error(err)
+		}
+		return
+	}
+	if ut != nil {
+		d.saveError(&UnmarshalTypeError{"object", v.Type()})
+		d.off--
+		d.next() // skip over { } in input
+		return
+	}
+	v = pv
+
+	// Decoding into nil interface?  Switch to non-reflect code.
+	if v.Kind() == reflect.Interface && v.NumMethod() == 0 {
+		v.Set(reflect.ValueOf(d.objectInterface()))
+		return
+	}
+
+	// Check type of target: struct or map[string]T
+	switch v.Kind() {
+	case reflect.Map:
+		// map must have string kind
+		t := v.Type()
+		if t.Key().Kind() != reflect.String {
+			d.saveError(&UnmarshalTypeError{"object", v.Type()})
+			d.off--
+			d.next() // skip over { } in input
+			return
+		}
+		if v.IsNil() {
+			v.Set(reflect.MakeMap(t))
+		}
+	case reflect.Struct:
+
+	default:
+		d.saveError(&UnmarshalTypeError{"object", v.Type()})
+		d.off--
+		d.next() // skip over { } in input
+		return
+	}
+
+	var mapElem reflect.Value
+
+	for {
+		// Read opening " of string key or closing }.
+		op := d.scanWhile(ScanSkipSpace)
+		if op == ScanEndObject {
+			// closing } - can only happen on first iteration.
+			break
+		}
+		if op != ScanBeginLiteral {
+			d.error(errPhase)
+		}
+
+		// Read key.
+		start := d.off - 1
+		op = d.scanWhile(ScanContinue)
+		item := d.data[start : d.off-1]
+		key, ok := UnquoteBytes(item)
+		if !ok {
+			d.error(errPhase)
+		}
+
+		// Figure out field corresponding to key.
+		var subv reflect.Value
+		destring := false // whether the value is wrapped in a string to be decoded first
+
+		if v.Kind() == reflect.Map {
+			elemType := v.Type().Elem()
+			if !mapElem.IsValid() {
+				mapElem = reflect.New(elemType).Elem()
+			} else {
+				mapElem.Set(reflect.Zero(elemType))
+			}
+			subv = mapElem
+		} else {
+			var f *field
+			fields := cachedTypeFields(v.Type())
+			for i := range fields {
+				ff := &fields[i]
+				if bytes.Equal(ff.nameBytes, key) {
+					f = ff
+					break
+				}
+				if f == nil && ff.equalFold(ff.nameBytes, key) {
+					f = ff
+				}
+			}
+			if f != nil {
+				subv = v
+				destring = f.quoted
+				for _, i := range f.index {
+					if subv.Kind() == reflect.Ptr {
+						if subv.IsNil() {
+							subv.Set(reflect.New(subv.Type().Elem()))
+						}
+						subv = subv.Elem()
+					}
+					subv = subv.Field(i)
+				}
+			}
+		}
+
+		// Read : before value.
+		if op == ScanSkipSpace {
+			op = d.scanWhile(ScanSkipSpace)
+		}
+		if op != ScanObjectKey {
+			d.error(errPhase)
+		}
+
+		// Read value.
+		if destring {
+			switch qv := d.valueQuoted().(type) {
+			case nil:
+				d.literalStore(nullLiteral, subv, false)
+			case string:
+				d.literalStore([]byte(qv), subv, true)
+			default:
+				d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", item, v.Type()))
+			}
+		} else {
+			d.value(subv)
+		}
+
+		// Write value back to map;
+		// if using struct, subv points into struct already.
+		if v.Kind() == reflect.Map {
+			kv := reflect.ValueOf(key).Convert(v.Type().Key())
+			v.SetMapIndex(kv, subv)
+		}
+
+		// Next token must be , or }.
+		op = d.scanWhile(ScanSkipSpace)
+		if op == ScanEndObject {
+			break
+		}
+		if op != ScanObjectValue {
+			d.error(errPhase)
+		}
+	}
+}
+
+// literal consumes a literal from d.data[d.off-1:], decoding into the value v.
+// The first byte of the literal has been read already
+// (that's how the caller knows it's a literal).
+func (d *decodeState) literal(v reflect.Value) {
+	// All bytes inside literal return scanContinue op code.
+	start := d.off - 1
+	op := d.scanWhile(ScanContinue)
+
+	// Scan read one byte too far; back up.
+	d.off--
+	d.scan.undo(op)
+
+	d.literalStore(d.data[start:d.off], v, false)
+}
+
+// convertNumber converts the number literal s to a float64 or a Number
+// depending on the setting of d.useNumber.
+func (d *decodeState) convertNumber(s string) (interface{}, error) {
+	if d.useNumber {
+		return Number(s), nil
+	}
+	f, err := strconv.ParseFloat(s, 64)
+	if err != nil {
+		return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0)}
+	}
+	return f, nil
+}
+
+var numberType = reflect.TypeOf(Number(""))
+
+// literalStore decodes a literal stored in item into v.
+//
+// fromQuoted indicates whether this literal came from unwrapping a
+// string from the ",string" struct tag option. this is used only to
+// produce more helpful error messages.
+func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) {
+	// Check for unmarshaler.
+	if len(item) == 0 {
+		//Empty string given
+		d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+		return
+	}
+	wantptr := item[0] == 'n' // null
+	u, ut, pv := d.indirect(v, wantptr)
+	if u != nil {
+		err := u.UnmarshalJSON(item)
+		if err != nil {
+			d.error(err)
+		}
+		return
+	}
+	if ut != nil {
+		if item[0] != '"' {
+			if fromQuoted {
+				d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+			} else {
+				d.saveError(&UnmarshalTypeError{"string", v.Type()})
+			}
+		}
+		s, ok := UnquoteBytes(item)
+		if !ok {
+			if fromQuoted {
+				d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+			} else {
+				d.error(errPhase)
+			}
+		}
+		err := ut.UnmarshalText(s)
+		if err != nil {
+			d.error(err)
+		}
+		return
+	}
+
+	v = pv
+
+	switch c := item[0]; c {
+	case 'n': // null
+		switch v.Kind() {
+		case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
+			v.Set(reflect.Zero(v.Type()))
+			// otherwise, ignore null for primitives/string
+		}
+	case 't', 'f': // true, false
+		value := c == 't'
+		switch v.Kind() {
+		default:
+			if fromQuoted {
+				d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+			} else {
+				d.saveError(&UnmarshalTypeError{"bool", v.Type()})
+			}
+		case reflect.Bool:
+			v.SetBool(value)
+		case reflect.Interface:
+			if v.NumMethod() == 0 {
+				v.Set(reflect.ValueOf(value))
+			} else {
+				d.saveError(&UnmarshalTypeError{"bool", v.Type()})
+			}
+		}
+
+	case '"': // string
+		s, ok := UnquoteBytes(item)
+		if !ok {
+			if fromQuoted {
+				d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+			} else {
+				d.error(errPhase)
+			}
+		}
+		switch v.Kind() {
+		default:
+			d.saveError(&UnmarshalTypeError{"string", v.Type()})
+		case reflect.Slice:
+			if v.Type() != byteSliceType {
+				d.saveError(&UnmarshalTypeError{"string", v.Type()})
+				break
+			}
+			b := make([]byte, base64.StdEncoding.DecodedLen(len(s)))
+			n, err := base64.StdEncoding.Decode(b, s)
+			if err != nil {
+				d.saveError(err)
+				break
+			}
+			v.Set(reflect.ValueOf(b[0:n]))
+		case reflect.String:
+			v.SetString(string(s))
+		case reflect.Interface:
+			if v.NumMethod() == 0 {
+				v.Set(reflect.ValueOf(string(s)))
+			} else {
+				d.saveError(&UnmarshalTypeError{"string", v.Type()})
+			}
+		}
+
+	default: // number
+		if c != '-' && (c < '0' || c > '9') {
+			if fromQuoted {
+				d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+			} else {
+				d.error(errPhase)
+			}
+		}
+		s := string(item)
+		switch v.Kind() {
+		default:
+			if v.Kind() == reflect.String && v.Type() == numberType {
+				v.SetString(s)
+				break
+			}
+			if fromQuoted {
+				d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+			} else {
+				d.error(&UnmarshalTypeError{"number", v.Type()})
+			}
+		case reflect.Interface:
+			n, err := d.convertNumber(s)
+			if err != nil {
+				d.saveError(err)
+				break
+			}
+			if v.NumMethod() != 0 {
+				d.saveError(&UnmarshalTypeError{"number", v.Type()})
+				break
+			}
+			v.Set(reflect.ValueOf(n))
+
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+			n, err := strconv.ParseInt(s, 10, 64)
+			if err != nil || v.OverflowInt(n) {
+				d.saveError(&UnmarshalTypeError{"number " + s, v.Type()})
+				break
+			}
+			v.SetInt(n)
+
+		case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+			n, err := strconv.ParseUint(s, 10, 64)
+			if err != nil || v.OverflowUint(n) {
+				d.saveError(&UnmarshalTypeError{"number " + s, v.Type()})
+				break
+			}
+			v.SetUint(n)
+
+		case reflect.Float32, reflect.Float64:
+			n, err := strconv.ParseFloat(s, v.Type().Bits())
+			if err != nil || v.OverflowFloat(n) {
+				d.saveError(&UnmarshalTypeError{"number " + s, v.Type()})
+				break
+			}
+			v.SetFloat(n)
+		}
+	}
+}
+
+// The xxxInterface routines build up a value to be stored
+// in an empty interface.  They are not strictly necessary,
+// but they avoid the weight of reflection in this common case.
+
+// valueInterface is like value but returns interface{}
+func (d *decodeState) valueInterface() interface{} {
+	switch d.scanWhile(ScanSkipSpace) {
+	default:
+		d.error(errPhase)
+		panic("unreachable")
+	case ScanBeginArray:
+		return d.arrayInterface()
+	case ScanBeginObject:
+		return d.objectInterface()
+	case ScanBeginLiteral:
+		return d.literalInterface()
+	}
+}
+
+// arrayInterface is like array but returns []interface{}.
+func (d *decodeState) arrayInterface() []interface{} {
+	var v = make([]interface{}, 0)
+	for {
+		// Look ahead for ] - can only happen on first iteration.
+		op := d.scanWhile(ScanSkipSpace)
+		if op == ScanEndArray {
+			break
+		}
+
+		// Back up so d.value can have the byte we just read.
+		d.off--
+		d.scan.undo(op)
+
+		v = append(v, d.valueInterface())
+
+		// Next token must be , or ].
+		op = d.scanWhile(ScanSkipSpace)
+		if op == ScanEndArray {
+			break
+		}
+		if op != ScanArrayValue {
+			d.error(errPhase)
+		}
+	}
+	return v
+}
+
+// objectInterface is like object but returns map[string]interface{}.
+func (d *decodeState) objectInterface() map[string]interface{} {
+	m := make(map[string]interface{})
+	for {
+		// Read opening " of string key or closing }.
+		op := d.scanWhile(ScanSkipSpace)
+		if op == ScanEndObject {
+			// closing } - can only happen on first iteration.
+			break
+		}
+		if op != ScanBeginLiteral {
+			d.error(errPhase)
+		}
+
+		// Read string key.
+		start := d.off - 1
+		op = d.scanWhile(ScanContinue)
+		item := d.data[start : d.off-1]
+		key, ok := unquote(item)
+		if !ok {
+			d.error(errPhase)
+		}
+
+		// Read : before value.
+		if op == ScanSkipSpace {
+			op = d.scanWhile(ScanSkipSpace)
+		}
+		if op != ScanObjectKey {
+			d.error(errPhase)
+		}
+
+		// Read value.
+		m[key] = d.valueInterface()
+
+		// Next token must be , or }.
+		op = d.scanWhile(ScanSkipSpace)
+		if op == ScanEndObject {
+			break
+		}
+		if op != ScanObjectValue {
+			d.error(errPhase)
+		}
+	}
+	return m
+}
+
+// literalInterface is like literal but returns an interface value.
+func (d *decodeState) literalInterface() interface{} {
+	// All bytes inside literal return scanContinue op code.
+	start := d.off - 1
+	op := d.scanWhile(ScanContinue)
+
+	// Scan read one byte too far; back up.
+	d.off--
+	d.scan.undo(op)
+	item := d.data[start:d.off]
+
+	switch c := item[0]; c {
+	case 'n': // null
+		return nil
+
+	case 't', 'f': // true, false
+		return c == 't'
+
+	case '"': // string
+		s, ok := unquote(item)
+		if !ok {
+			d.error(errPhase)
+		}
+		return s
+
+	default: // number
+		if c != '-' && (c < '0' || c > '9') {
+			d.error(errPhase)
+		}
+		n, err := d.convertNumber(string(item))
+		if err != nil {
+			d.saveError(err)
+		}
+		return n
+	}
+}
+
+// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
+// or it returns -1.
+func getu4(s []byte) rune {
+	if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
+		return -1
+	}
+	r, err := strconv.ParseUint(string(s[2:6]), 16, 64)
+	if err != nil {
+		return -1
+	}
+	return rune(r)
+}
+
+// unquote converts a quoted JSON string literal s into an actual string t.
+// The rules are different than for Go, so cannot use strconv.Unquote.
+func unquote(s []byte) (t string, ok bool) {
+	s, ok = UnquoteBytes(s)
+	t = string(s)
+	return
+}
+
+func UnquoteBytes(s []byte) (t []byte, ok bool) {
+	if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' {
+		s = bytes.TrimSpace(s)
+
+		if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' {
+			return
+		}
+	}
+
+	s = s[1 : len(s)-1]
+
+	// Check for unusual characters. If there are none,
+	// then no unquoting is needed, so return a slice of the
+	// original bytes.
+	r := 0
+	for r < len(s) {
+		c := s[r]
+		if c == '\\' || c == '"' || c < ' ' {
+			break
+		}
+		if c < utf8.RuneSelf {
+			r++
+			continue
+		}
+		rr, size := utf8.DecodeRune(s[r:])
+		if rr == utf8.RuneError && size == 1 {
+			break
+		}
+		r += size
+	}
+	if r == len(s) {
+		return s, true
+	}
+
+	b := make([]byte, len(s)+2*utf8.UTFMax)
+	w := copy(b, s[0:r])
+	for r < len(s) {
+		// Out of room?  Can only happen if s is full of
+		// malformed UTF-8 and we're replacing each
+		// byte with RuneError.
+		if w >= len(b)-2*utf8.UTFMax {
+			nb := make([]byte, (len(b)+utf8.UTFMax)*2)
+			copy(nb, b[0:w])
+			b = nb
+		}
+		switch c := s[r]; {
+		case c == '\\':
+			r++
+			if r >= len(s) {
+				return
+			}
+			switch s[r] {
+			default:
+				return
+			case '"', '\\', '/', '\'':
+				b[w] = s[r]
+				r++
+				w++
+			case 'b':
+				b[w] = '\b'
+				r++
+				w++
+			case 'f':
+				b[w] = '\f'
+				r++
+				w++
+			case 'n':
+				b[w] = '\n'
+				r++
+				w++
+			case 'r':
+				b[w] = '\r'
+				r++
+				w++
+			case 't':
+				b[w] = '\t'
+				r++
+				w++
+			case 'u':
+				r--
+				rr := getu4(s[r:])
+				if rr < 0 {
+					return
+				}
+				r += 6
+				if utf16.IsSurrogate(rr) {
+					rr1 := getu4(s[r:])
+					if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
+						// A valid pair; consume.
+						r += 6
+						w += utf8.EncodeRune(b[w:], dec)
+						break
+					}
+					// Invalid surrogate; fall back to replacement rune.
+					rr = unicode.ReplacementChar
+				}
+				w += utf8.EncodeRune(b[w:], rr)
+			}
+
+		// Quote, control characters are invalid.
+		case c == '"', c < ' ':
+			return
+
+		// ASCII
+		case c < utf8.RuneSelf:
+			b[w] = c
+			r++
+			w++
+
+		// Coerce to well-formed UTF-8.
+		default:
+			rr, size := utf8.DecodeRune(s[r:])
+			r += size
+			w += utf8.EncodeRune(b[w:], rr)
+		}
+	}
+	return b[0:w], true
+}

+ 1183 - 0
vendor/github.com/dustin/gojson/encode.go

@@ -0,0 +1,1183 @@
+// Copyright 2010 The Go Authors.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package json implements encoding and decoding of JSON objects as defined in
+// RFC 4627. The mapping between JSON objects and Go values is described
+// in the documentation for the Marshal and Unmarshal functions.
+//
+// See "JSON and Go" for an introduction to this package:
+// http://golang.org/doc/articles/json_and_go.html
+package json
+
+import (
+	"bytes"
+	"encoding"
+	"encoding/base64"
+	"math"
+	"reflect"
+	"runtime"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+	"unicode"
+	"unicode/utf8"
+)
+
+// Marshal returns the JSON encoding of v.
+//
+// Marshal traverses the value v recursively.
+// If an encountered value implements the Marshaler interface
+// and is not a nil pointer, Marshal calls its MarshalJSON method
+// to produce JSON.  The nil pointer exception is not strictly necessary
+// but mimics a similar, necessary exception in the behavior of
+// UnmarshalJSON.
+//
+// Otherwise, Marshal uses the following type-dependent default encodings:
+//
+// Boolean values encode as JSON booleans.
+//
+// Floating point, integer, and Number values encode as JSON numbers.
+//
+// String values encode as JSON strings coerced to valid UTF-8,
+// replacing invalid bytes with the Unicode replacement rune.
+// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e"
+// to keep some browsers from misinterpreting JSON output as HTML.
+// Ampersand "&" is also escaped to "\u0026" for the same reason.
+//
+// Array and slice values encode as JSON arrays, except that
+// []byte encodes as a base64-encoded string, and a nil slice
+// encodes as the null JSON object.
+//
+// Struct values encode as JSON objects. Each exported struct field
+// becomes a member of the object unless
+//   - the field's tag is "-", or
+//   - the field is empty and its tag specifies the "omitempty" option.
+// The empty values are false, 0, any
+// nil pointer or interface value, and any array, slice, map, or string of
+// length zero. The object's default key string is the struct field name
+// but can be specified in the struct field's tag value. The "json" key in
+// the struct field's tag value is the key name, followed by an optional comma
+// and options. Examples:
+//
+//   // Field is ignored by this package.
+//   Field int `json:"-"`
+//
+//   // Field appears in JSON as key "myName".
+//   Field int `json:"myName"`
+//
+//   // Field appears in JSON as key "myName" and
+//   // the field is omitted from the object if its value is empty,
+//   // as defined above.
+//   Field int `json:"myName,omitempty"`
+//
+//   // Field appears in JSON as key "Field" (the default), but
+//   // the field is skipped if empty.
+//   // Note the leading comma.
+//   Field int `json:",omitempty"`
+//
+// The "string" option signals that a field is stored as JSON inside a
+// JSON-encoded string. It applies only to fields of string, floating point,
+// or integer types. This extra level of encoding is sometimes used when
+// communicating with JavaScript programs:
+//
+//    Int64String int64 `json:",string"`
+//
+// The key name will be used if it's a non-empty string consisting of
+// only Unicode letters, digits, dollar signs, percent signs, hyphens,
+// underscores and slashes.
+//
+// Anonymous struct fields are usually marshaled as if their inner exported fields
+// were fields in the outer struct, subject to the usual Go visibility rules amended
+// as described in the next paragraph.
+// An anonymous struct field with a name given in its JSON tag is treated as
+// having that name, rather than being anonymous.
+// An anonymous struct field of interface type is treated the same as having
+// that type as its name, rather than being anonymous.
+//
+// The Go visibility rules for struct fields are amended for JSON when
+// deciding which field to marshal or unmarshal. If there are
+// multiple fields at the same level, and that level is the least
+// nested (and would therefore be the nesting level selected by the
+// usual Go rules), the following extra rules apply:
+//
+// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered,
+// even if there are multiple untagged fields that would otherwise conflict.
+// 2) If there is exactly one field (tagged or not according to the first rule), that is selected.
+// 3) Otherwise there are multiple fields, and all are ignored; no error occurs.
+//
+// Handling of anonymous struct fields is new in Go 1.1.
+// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of
+// an anonymous struct field in both current and earlier versions, give the field
+// a JSON tag of "-".
+//
+// Map values encode as JSON objects.
+// The map's key type must be string; the object keys are used directly
+// as map keys.
+//
+// Pointer values encode as the value pointed to.
+// A nil pointer encodes as the null JSON object.
+//
+// Interface values encode as the value contained in the interface.
+// A nil interface value encodes as the null JSON object.
+//
+// Channel, complex, and function values cannot be encoded in JSON.
+// Attempting to encode such a value causes Marshal to return
+// an UnsupportedTypeError.
+//
+// JSON cannot represent cyclic data structures and Marshal does not
+// handle them.  Passing cyclic structures to Marshal will result in
+// an infinite recursion.
+//
+func Marshal(v interface{}) ([]byte, error) {
+	e := &encodeState{}
+	err := e.marshal(v)
+	if err != nil {
+		return nil, err
+	}
+	return e.Bytes(), nil
+}
+
+// MarshalIndent is like Marshal but applies Indent to format the output.
+func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
+	b, err := Marshal(v)
+	if err != nil {
+		return nil, err
+	}
+	var buf bytes.Buffer
+	err = Indent(&buf, b, prefix, indent)
+	if err != nil {
+		return nil, err
+	}
+	return buf.Bytes(), nil
+}
+
+// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029
+// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029
+// so that the JSON will be safe to embed inside HTML <script> tags.
+// For historical reasons, web browsers don't honor standard HTML
+// escaping within <script> tags, so an alternative JSON encoding must
+// be used.
+func HTMLEscape(dst *bytes.Buffer, src []byte) {
+	// The characters can only appear in string literals,
+	// so just scan the string one byte at a time.
+	start := 0
+	for i, c := range src {
+		if c == '<' || c == '>' || c == '&' {
+			if start < i {
+				dst.Write(src[start:i])
+			}
+			dst.WriteString(`\u00`)
+			dst.WriteByte(hex[c>>4])
+			dst.WriteByte(hex[c&0xF])
+			start = i + 1
+		}
+		// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
+		if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
+			if start < i {
+				dst.Write(src[start:i])
+			}
+			dst.WriteString(`\u202`)
+			dst.WriteByte(hex[src[i+2]&0xF])
+			start = i + 3
+		}
+	}
+	if start < len(src) {
+		dst.Write(src[start:])
+	}
+}
+
+// Marshaler is the interface implemented by objects that
+// can marshal themselves into valid JSON.
+type Marshaler interface {
+	MarshalJSON() ([]byte, error)
+}
+
+// An UnsupportedTypeError is returned by Marshal when attempting
+// to encode an unsupported value type.
+type UnsupportedTypeError struct {
+	Type reflect.Type
+}
+
+func (e *UnsupportedTypeError) Error() string {
+	return "json: unsupported type: " + e.Type.String()
+}
+
+type UnsupportedValueError struct {
+	Value reflect.Value
+	Str   string
+}
+
+func (e *UnsupportedValueError) Error() string {
+	return "json: unsupported value: " + e.Str
+}
+
+// Before Go 1.2, an InvalidUTF8Error was returned by Marshal when
+// attempting to encode a string value with invalid UTF-8 sequences.
+// As of Go 1.2, Marshal instead coerces the string to valid UTF-8 by
+// replacing invalid bytes with the Unicode replacement rune U+FFFD.
+// This error is no longer generated but is kept for backwards compatibility
+// with programs that might mention it.
+type InvalidUTF8Error struct {
+	S string // the whole string value that caused the error
+}
+
+func (e *InvalidUTF8Error) Error() string {
+	return "json: invalid UTF-8 in string: " + strconv.Quote(e.S)
+}
+
+type MarshalerError struct {
+	Type reflect.Type
+	Err  error
+}
+
+func (e *MarshalerError) Error() string {
+	return "json: error calling MarshalJSON for type " + e.Type.String() + ": " + e.Err.Error()
+}
+
+var hex = "0123456789abcdef"
+
+// An encodeState encodes JSON into a bytes.Buffer.
+type encodeState struct {
+	bytes.Buffer // accumulated output
+	scratch      [64]byte
+}
+
+var encodeStatePool sync.Pool
+
+func newEncodeState() *encodeState {
+	if v := encodeStatePool.Get(); v != nil {
+		e := v.(*encodeState)
+		e.Reset()
+		return e
+	}
+	return new(encodeState)
+}
+
+func (e *encodeState) marshal(v interface{}) (err error) {
+	defer func() {
+		if r := recover(); r != nil {
+			if _, ok := r.(runtime.Error); ok {
+				panic(r)
+			}
+			if s, ok := r.(string); ok {
+				panic(s)
+			}
+			err = r.(error)
+		}
+	}()
+	e.reflectValue(reflect.ValueOf(v))
+	return nil
+}
+
+func (e *encodeState) error(err error) {
+	panic(err)
+}
+
+var byteSliceType = reflect.TypeOf([]byte(nil))
+
+func isEmptyValue(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+		return v.Len() == 0
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return v.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.Interface, reflect.Ptr:
+		return v.IsNil()
+	}
+	return false
+}
+
+func (e *encodeState) reflectValue(v reflect.Value) {
+	valueEncoder(v)(e, v, false)
+}
+
+type encoderFunc func(e *encodeState, v reflect.Value, quoted bool)
+
+var encoderCache struct {
+	sync.RWMutex
+	m map[reflect.Type]encoderFunc
+}
+
+func valueEncoder(v reflect.Value) encoderFunc {
+	if !v.IsValid() {
+		return invalidValueEncoder
+	}
+	return typeEncoder(v.Type())
+}
+
+func typeEncoder(t reflect.Type) encoderFunc {
+	encoderCache.RLock()
+	f := encoderCache.m[t]
+	encoderCache.RUnlock()
+	if f != nil {
+		return f
+	}
+
+	// To deal with recursive types, populate the map with an
+	// indirect func before we build it. This type waits on the
+	// real func (f) to be ready and then calls it.  This indirect
+	// func is only used for recursive types.
+	encoderCache.Lock()
+	if encoderCache.m == nil {
+		encoderCache.m = make(map[reflect.Type]encoderFunc)
+	}
+	var wg sync.WaitGroup
+	wg.Add(1)
+	encoderCache.m[t] = func(e *encodeState, v reflect.Value, quoted bool) {
+		wg.Wait()
+		f(e, v, quoted)
+	}
+	encoderCache.Unlock()
+
+	// Compute fields without lock.
+	// Might duplicate effort but won't hold other computations back.
+	f = newTypeEncoder(t, true)
+	wg.Done()
+	encoderCache.Lock()
+	encoderCache.m[t] = f
+	encoderCache.Unlock()
+	return f
+}
+
+var (
+	marshalerType     = reflect.TypeOf(new(Marshaler)).Elem()
+	textMarshalerType = reflect.TypeOf(new(encoding.TextMarshaler)).Elem()
+)
+
+// newTypeEncoder constructs an encoderFunc for a type.
+// The returned encoder only checks CanAddr when allowAddr is true.
+func newTypeEncoder(t reflect.Type, allowAddr bool) encoderFunc {
+	if t.Implements(marshalerType) {
+		return marshalerEncoder
+	}
+	if t.Kind() != reflect.Ptr && allowAddr {
+		if reflect.PtrTo(t).Implements(marshalerType) {
+			return newCondAddrEncoder(addrMarshalerEncoder, newTypeEncoder(t, false))
+		}
+	}
+
+	if t.Implements(textMarshalerType) {
+		return textMarshalerEncoder
+	}
+	if t.Kind() != reflect.Ptr && allowAddr {
+		if reflect.PtrTo(t).Implements(textMarshalerType) {
+			return newCondAddrEncoder(addrTextMarshalerEncoder, newTypeEncoder(t, false))
+		}
+	}
+
+	switch t.Kind() {
+	case reflect.Bool:
+		return boolEncoder
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return intEncoder
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return uintEncoder
+	case reflect.Float32:
+		return float32Encoder
+	case reflect.Float64:
+		return float64Encoder
+	case reflect.String:
+		return stringEncoder
+	case reflect.Interface:
+		return interfaceEncoder
+	case reflect.Struct:
+		return newStructEncoder(t)
+	case reflect.Map:
+		return newMapEncoder(t)
+	case reflect.Slice:
+		return newSliceEncoder(t)
+	case reflect.Array:
+		return newArrayEncoder(t)
+	case reflect.Ptr:
+		return newPtrEncoder(t)
+	default:
+		return unsupportedTypeEncoder
+	}
+}
+
+func invalidValueEncoder(e *encodeState, v reflect.Value, quoted bool) {
+	e.WriteString("null")
+}
+
+func marshalerEncoder(e *encodeState, v reflect.Value, quoted bool) {
+	if v.Kind() == reflect.Ptr && v.IsNil() {
+		e.WriteString("null")
+		return
+	}
+	m := v.Interface().(Marshaler)
+	b, err := m.MarshalJSON()
+	if err == nil {
+		// copy JSON into buffer, checking validity.
+		err = compact(&e.Buffer, b, true)
+	}
+	if err != nil {
+		e.error(&MarshalerError{v.Type(), err})
+	}
+}
+
+func addrMarshalerEncoder(e *encodeState, v reflect.Value, quoted bool) {
+	va := v.Addr()
+	if va.IsNil() {
+		e.WriteString("null")
+		return
+	}
+	m := va.Interface().(Marshaler)
+	b, err := m.MarshalJSON()
+	if err == nil {
+		// copy JSON into buffer, checking validity.
+		err = compact(&e.Buffer, b, true)
+	}
+	if err != nil {
+		e.error(&MarshalerError{v.Type(), err})
+	}
+}
+
+func textMarshalerEncoder(e *encodeState, v reflect.Value, quoted bool) {
+	if v.Kind() == reflect.Ptr && v.IsNil() {
+		e.WriteString("null")
+		return
+	}
+	m := v.Interface().(encoding.TextMarshaler)
+	b, err := m.MarshalText()
+	if err == nil {
+		_, err = e.stringBytes(b)
+	}
+	if err != nil {
+		e.error(&MarshalerError{v.Type(), err})
+	}
+}
+
+func addrTextMarshalerEncoder(e *encodeState, v reflect.Value, quoted bool) {
+	va := v.Addr()
+	if va.IsNil() {
+		e.WriteString("null")
+		return
+	}
+	m := va.Interface().(encoding.TextMarshaler)
+	b, err := m.MarshalText()
+	if err == nil {
+		_, err = e.stringBytes(b)
+	}
+	if err != nil {
+		e.error(&MarshalerError{v.Type(), err})
+	}
+}
+
+func boolEncoder(e *encodeState, v reflect.Value, quoted bool) {
+	if quoted {
+		e.WriteByte('"')
+	}
+	if v.Bool() {
+		e.WriteString("true")
+	} else {
+		e.WriteString("false")
+	}
+	if quoted {
+		e.WriteByte('"')
+	}
+}
+
+func intEncoder(e *encodeState, v reflect.Value, quoted bool) {
+	b := strconv.AppendInt(e.scratch[:0], v.Int(), 10)
+	if quoted {
+		e.WriteByte('"')
+	}
+	e.Write(b)
+	if quoted {
+		e.WriteByte('"')
+	}
+}
+
+func uintEncoder(e *encodeState, v reflect.Value, quoted bool) {
+	b := strconv.AppendUint(e.scratch[:0], v.Uint(), 10)
+	if quoted {
+		e.WriteByte('"')
+	}
+	e.Write(b)
+	if quoted {
+		e.WriteByte('"')
+	}
+}
+
+type floatEncoder int // number of bits
+
+func (bits floatEncoder) encode(e *encodeState, v reflect.Value, quoted bool) {
+	f := v.Float()
+	if math.IsInf(f, 0) || math.IsNaN(f) {
+		e.error(&UnsupportedValueError{v, strconv.FormatFloat(f, 'g', -1, int(bits))})
+	}
+	b := strconv.AppendFloat(e.scratch[:0], f, 'g', -1, int(bits))
+	if quoted {
+		e.WriteByte('"')
+	}
+	e.Write(b)
+	if quoted {
+		e.WriteByte('"')
+	}
+}
+
+var (
+	float32Encoder = (floatEncoder(32)).encode
+	float64Encoder = (floatEncoder(64)).encode
+)
+
+func stringEncoder(e *encodeState, v reflect.Value, quoted bool) {
+	if v.Type() == numberType {
+		numStr := v.String()
+		if numStr == "" {
+			numStr = "0" // Number's zero-val
+		}
+		e.WriteString(numStr)
+		return
+	}
+	if quoted {
+		sb, err := Marshal(v.String())
+		if err != nil {
+			e.error(err)
+		}
+		e.string(string(sb))
+	} else {
+		e.string(v.String())
+	}
+}
+
+func interfaceEncoder(e *encodeState, v reflect.Value, quoted bool) {
+	if v.IsNil() {
+		e.WriteString("null")
+		return
+	}
+	e.reflectValue(v.Elem())
+}
+
+func unsupportedTypeEncoder(e *encodeState, v reflect.Value, quoted bool) {
+	e.error(&UnsupportedTypeError{v.Type()})
+}
+
+type structEncoder struct {
+	fields    []field
+	fieldEncs []encoderFunc
+}
+
+func (se *structEncoder) encode(e *encodeState, v reflect.Value, quoted bool) {
+	e.WriteByte('{')
+	first := true
+	for i, f := range se.fields {
+		fv := fieldByIndex(v, f.index)
+		if !fv.IsValid() || f.omitEmpty && isEmptyValue(fv) {
+			continue
+		}
+		if first {
+			first = false
+		} else {
+			e.WriteByte(',')
+		}
+		e.string(f.name)
+		e.WriteByte(':')
+		se.fieldEncs[i](e, fv, f.quoted)
+	}
+	e.WriteByte('}')
+}
+
+func newStructEncoder(t reflect.Type) encoderFunc {
+	fields := cachedTypeFields(t)
+	se := &structEncoder{
+		fields:    fields,
+		fieldEncs: make([]encoderFunc, len(fields)),
+	}
+	for i, f := range fields {
+		se.fieldEncs[i] = typeEncoder(typeByIndex(t, f.index))
+	}
+	return se.encode
+}
+
+type mapEncoder struct {
+	elemEnc encoderFunc
+}
+
+func (me *mapEncoder) encode(e *encodeState, v reflect.Value, _ bool) {
+	if v.IsNil() {
+		e.WriteString("null")
+		return
+	}
+	e.WriteByte('{')
+	var sv stringValues = v.MapKeys()
+	sort.Sort(sv)
+	for i, k := range sv {
+		if i > 0 {
+			e.WriteByte(',')
+		}
+		e.string(k.String())
+		e.WriteByte(':')
+		me.elemEnc(e, v.MapIndex(k), false)
+	}
+	e.WriteByte('}')
+}
+
+func newMapEncoder(t reflect.Type) encoderFunc {
+	if t.Key().Kind() != reflect.String {
+		return unsupportedTypeEncoder
+	}
+	me := &mapEncoder{typeEncoder(t.Elem())}
+	return me.encode
+}
+
+func encodeByteSlice(e *encodeState, v reflect.Value, _ bool) {
+	if v.IsNil() {
+		e.WriteString("null")
+		return
+	}
+	s := v.Bytes()
+	e.WriteByte('"')
+	if len(s) < 1024 {
+		// for small buffers, using Encode directly is much faster.
+		dst := make([]byte, base64.StdEncoding.EncodedLen(len(s)))
+		base64.StdEncoding.Encode(dst, s)
+		e.Write(dst)
+	} else {
+		// for large buffers, avoid unnecessary extra temporary
+		// buffer space.
+		enc := base64.NewEncoder(base64.StdEncoding, e)
+		enc.Write(s)
+		enc.Close()
+	}
+	e.WriteByte('"')
+}
+
+// sliceEncoder just wraps an arrayEncoder, checking to make sure the value isn't nil.
+type sliceEncoder struct {
+	arrayEnc encoderFunc
+}
+
+func (se *sliceEncoder) encode(e *encodeState, v reflect.Value, _ bool) {
+	if v.IsNil() {
+		e.WriteString("null")
+		return
+	}
+	se.arrayEnc(e, v, false)
+}
+
+func newSliceEncoder(t reflect.Type) encoderFunc {
+	// Byte slices get special treatment; arrays don't.
+	if t.Elem().Kind() == reflect.Uint8 {
+		return encodeByteSlice
+	}
+	enc := &sliceEncoder{newArrayEncoder(t)}
+	return enc.encode
+}
+
+type arrayEncoder struct {
+	elemEnc encoderFunc
+}
+
+func (ae *arrayEncoder) encode(e *encodeState, v reflect.Value, _ bool) {
+	e.WriteByte('[')
+	n := v.Len()
+	for i := 0; i < n; i++ {
+		if i > 0 {
+			e.WriteByte(',')
+		}
+		ae.elemEnc(e, v.Index(i), false)
+	}
+	e.WriteByte(']')
+}
+
+func newArrayEncoder(t reflect.Type) encoderFunc {
+	enc := &arrayEncoder{typeEncoder(t.Elem())}
+	return enc.encode
+}
+
+type ptrEncoder struct {
+	elemEnc encoderFunc
+}
+
+func (pe *ptrEncoder) encode(e *encodeState, v reflect.Value, quoted bool) {
+	if v.IsNil() {
+		e.WriteString("null")
+		return
+	}
+	pe.elemEnc(e, v.Elem(), quoted)
+}
+
+func newPtrEncoder(t reflect.Type) encoderFunc {
+	enc := &ptrEncoder{typeEncoder(t.Elem())}
+	return enc.encode
+}
+
+type condAddrEncoder struct {
+	canAddrEnc, elseEnc encoderFunc
+}
+
+func (ce *condAddrEncoder) encode(e *encodeState, v reflect.Value, quoted bool) {
+	if v.CanAddr() {
+		ce.canAddrEnc(e, v, quoted)
+	} else {
+		ce.elseEnc(e, v, quoted)
+	}
+}
+
+// newCondAddrEncoder returns an encoder that checks whether its value
+// CanAddr and delegates to canAddrEnc if so, else to elseEnc.
+func newCondAddrEncoder(canAddrEnc, elseEnc encoderFunc) encoderFunc {
+	enc := &condAddrEncoder{canAddrEnc: canAddrEnc, elseEnc: elseEnc}
+	return enc.encode
+}
+
+func isValidTag(s string) bool {
+	if s == "" {
+		return false
+	}
+	for _, c := range s {
+		switch {
+		case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
+			// Backslash and quote chars are reserved, but
+			// otherwise any punctuation chars are allowed
+			// in a tag name.
+		default:
+			if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+func fieldByIndex(v reflect.Value, index []int) reflect.Value {
+	for _, i := range index {
+		if v.Kind() == reflect.Ptr {
+			if v.IsNil() {
+				return reflect.Value{}
+			}
+			v = v.Elem()
+		}
+		v = v.Field(i)
+	}
+	return v
+}
+
+func typeByIndex(t reflect.Type, index []int) reflect.Type {
+	for _, i := range index {
+		if t.Kind() == reflect.Ptr {
+			t = t.Elem()
+		}
+		t = t.Field(i).Type
+	}
+	return t
+}
+
+// stringValues is a slice of reflect.Value holding *reflect.StringValue.
+// It implements the methods to sort by string.
+type stringValues []reflect.Value
+
+func (sv stringValues) Len() int           { return len(sv) }
+func (sv stringValues) Swap(i, j int)      { sv[i], sv[j] = sv[j], sv[i] }
+func (sv stringValues) Less(i, j int) bool { return sv.get(i) < sv.get(j) }
+func (sv stringValues) get(i int) string   { return sv[i].String() }
+
+// NOTE: keep in sync with stringBytes below.
+func (e *encodeState) string(s string) (int, error) {
+	len0 := e.Len()
+	e.WriteByte('"')
+	start := 0
+	for i := 0; i < len(s); {
+		if b := s[i]; b < utf8.RuneSelf {
+			if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
+				i++
+				continue
+			}
+			if start < i {
+				e.WriteString(s[start:i])
+			}
+			switch b {
+			case '\\', '"':
+				e.WriteByte('\\')
+				e.WriteByte(b)
+			case '\n':
+				e.WriteByte('\\')
+				e.WriteByte('n')
+			case '\r':
+				e.WriteByte('\\')
+				e.WriteByte('r')
+			case '\t':
+				e.WriteByte('\\')
+				e.WriteByte('t')
+			default:
+				// This encodes bytes < 0x20 except for \n and \r,
+				// as well as <, > and &. The latter are escaped because they
+				// can lead to security holes when user-controlled strings
+				// are rendered into JSON and served to some browsers.
+				e.WriteString(`\u00`)
+				e.WriteByte(hex[b>>4])
+				e.WriteByte(hex[b&0xF])
+			}
+			i++
+			start = i
+			continue
+		}
+		c, size := utf8.DecodeRuneInString(s[i:])
+		if c == utf8.RuneError && size == 1 {
+			if start < i {
+				e.WriteString(s[start:i])
+			}
+			e.WriteString(`\ufffd`)
+			i += size
+			start = i
+			continue
+		}
+		// U+2028 is LINE SEPARATOR.
+		// U+2029 is PARAGRAPH SEPARATOR.
+		// They are both technically valid characters in JSON strings,
+		// but don't work in JSONP, which has to be evaluated as JavaScript,
+		// and can lead to security holes there. It is valid JSON to
+		// escape them, so we do so unconditionally.
+		// See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
+		if c == '\u2028' || c == '\u2029' {
+			if start < i {
+				e.WriteString(s[start:i])
+			}
+			e.WriteString(`\u202`)
+			e.WriteByte(hex[c&0xF])
+			i += size
+			start = i
+			continue
+		}
+		i += size
+	}
+	if start < len(s) {
+		e.WriteString(s[start:])
+	}
+	e.WriteByte('"')
+	return e.Len() - len0, nil
+}
+
+// NOTE: keep in sync with string above.
+func (e *encodeState) stringBytes(s []byte) (int, error) {
+	len0 := e.Len()
+	e.WriteByte('"')
+	start := 0
+	for i := 0; i < len(s); {
+		if b := s[i]; b < utf8.RuneSelf {
+			if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
+				i++
+				continue
+			}
+			if start < i {
+				e.Write(s[start:i])
+			}
+			switch b {
+			case '\\', '"':
+				e.WriteByte('\\')
+				e.WriteByte(b)
+			case '\n':
+				e.WriteByte('\\')
+				e.WriteByte('n')
+			case '\r':
+				e.WriteByte('\\')
+				e.WriteByte('r')
+			case '\t':
+				e.WriteByte('\\')
+				e.WriteByte('t')
+			default:
+				// This encodes bytes < 0x20 except for \n and \r,
+				// as well as <, >, and &. The latter are escaped because they
+				// can lead to security holes when user-controlled strings
+				// are rendered into JSON and served to some browsers.
+				e.WriteString(`\u00`)
+				e.WriteByte(hex[b>>4])
+				e.WriteByte(hex[b&0xF])
+			}
+			i++
+			start = i
+			continue
+		}
+		c, size := utf8.DecodeRune(s[i:])
+		if c == utf8.RuneError && size == 1 {
+			if start < i {
+				e.Write(s[start:i])
+			}
+			e.WriteString(`\ufffd`)
+			i += size
+			start = i
+			continue
+		}
+		// U+2028 is LINE SEPARATOR.
+		// U+2029 is PARAGRAPH SEPARATOR.
+		// They are both technically valid characters in JSON strings,
+		// but don't work in JSONP, which has to be evaluated as JavaScript,
+		// and can lead to security holes there. It is valid JSON to
+		// escape them, so we do so unconditionally.
+		// See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
+		if c == '\u2028' || c == '\u2029' {
+			if start < i {
+				e.Write(s[start:i])
+			}
+			e.WriteString(`\u202`)
+			e.WriteByte(hex[c&0xF])
+			i += size
+			start = i
+			continue
+		}
+		i += size
+	}
+	if start < len(s) {
+		e.Write(s[start:])
+	}
+	e.WriteByte('"')
+	return e.Len() - len0, nil
+}
+
+// A field represents a single field found in a struct.
+type field struct {
+	name      string
+	nameBytes []byte                 // []byte(name)
+	equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
+
+	tag       bool
+	index     []int
+	typ       reflect.Type
+	omitEmpty bool
+	quoted    bool
+}
+
+func fillField(f field) field {
+	f.nameBytes = []byte(f.name)
+	f.equalFold = foldFunc(f.nameBytes)
+	return f
+}
+
+// byName sorts field by name, breaking ties with depth,
+// then breaking ties with "name came from json tag", then
+// breaking ties with index sequence.
+type byName []field
+
+func (x byName) Len() int { return len(x) }
+
+func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byName) Less(i, j int) bool {
+	if x[i].name != x[j].name {
+		return x[i].name < x[j].name
+	}
+	if len(x[i].index) != len(x[j].index) {
+		return len(x[i].index) < len(x[j].index)
+	}
+	if x[i].tag != x[j].tag {
+		return x[i].tag
+	}
+	return byIndex(x).Less(i, j)
+}
+
+// byIndex sorts field by index sequence.
+type byIndex []field
+
+func (x byIndex) Len() int { return len(x) }
+
+func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byIndex) Less(i, j int) bool {
+	for k, xik := range x[i].index {
+		if k >= len(x[j].index) {
+			return false
+		}
+		if xik != x[j].index[k] {
+			return xik < x[j].index[k]
+		}
+	}
+	return len(x[i].index) < len(x[j].index)
+}
+
+// typeFields returns a list of fields that JSON should recognize for the given type.
+// The algorithm is breadth-first search over the set of structs to include - the top struct
+// and then any reachable anonymous structs.
+func typeFields(t reflect.Type) []field {
+	// Anonymous fields to explore at the current level and the next.
+	current := []field{}
+	next := []field{{typ: t}}
+
+	// Count of queued names for current level and the next.
+	count := map[reflect.Type]int{}
+	nextCount := map[reflect.Type]int{}
+
+	// Types already visited at an earlier level.
+	visited := map[reflect.Type]bool{}
+
+	// Fields found.
+	var fields []field
+
+	for len(next) > 0 {
+		current, next = next, current[:0]
+		count, nextCount = nextCount, map[reflect.Type]int{}
+
+		for _, f := range current {
+			if visited[f.typ] {
+				continue
+			}
+			visited[f.typ] = true
+
+			// Scan f.typ for fields to include.
+			for i := 0; i < f.typ.NumField(); i++ {
+				sf := f.typ.Field(i)
+				if sf.PkgPath != "" { // unexported
+					continue
+				}
+				tag := sf.Tag.Get("json")
+				if tag == "-" {
+					continue
+				}
+				name, opts := parseTag(tag)
+				if !isValidTag(name) {
+					name = ""
+				}
+				index := make([]int, len(f.index)+1)
+				copy(index, f.index)
+				index[len(f.index)] = i
+
+				ft := sf.Type
+				if ft.Name() == "" && ft.Kind() == reflect.Ptr {
+					// Follow pointer.
+					ft = ft.Elem()
+				}
+
+				// Record found field and index sequence.
+				if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
+					tagged := name != ""
+					if name == "" {
+						name = sf.Name
+					}
+					fields = append(fields, fillField(field{
+						name:      name,
+						tag:       tagged,
+						index:     index,
+						typ:       ft,
+						omitEmpty: opts.Contains("omitempty"),
+						quoted:    opts.Contains("string"),
+					}))
+					if count[f.typ] > 1 {
+						// If there were multiple instances, add a second,
+						// so that the annihilation code will see a duplicate.
+						// It only cares about the distinction between 1 or 2,
+						// so don't bother generating any more copies.
+						fields = append(fields, fields[len(fields)-1])
+					}
+					continue
+				}
+
+				// Record new anonymous struct to explore in next round.
+				nextCount[ft]++
+				if nextCount[ft] == 1 {
+					next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
+				}
+			}
+		}
+	}
+
+	sort.Sort(byName(fields))
+
+	// Delete all fields that are hidden by the Go rules for embedded fields,
+	// except that fields with JSON tags are promoted.
+
+	// The fields are sorted in primary order of name, secondary order
+	// of field index length. Loop over names; for each name, delete
+	// hidden fields by choosing the one dominant field that survives.
+	out := fields[:0]
+	for advance, i := 0, 0; i < len(fields); i += advance {
+		// One iteration per name.
+		// Find the sequence of fields with the name of this first field.
+		fi := fields[i]
+		name := fi.name
+		for advance = 1; i+advance < len(fields); advance++ {
+			fj := fields[i+advance]
+			if fj.name != name {
+				break
+			}
+		}
+		if advance == 1 { // Only one field with this name
+			out = append(out, fi)
+			continue
+		}
+		dominant, ok := dominantField(fields[i : i+advance])
+		if ok {
+			out = append(out, dominant)
+		}
+	}
+
+	fields = out
+	sort.Sort(byIndex(fields))
+
+	return fields
+}
+
+// dominantField looks through the fields, all of which are known to
+// have the same name, to find the single field that dominates the
+// others using Go's embedding rules, modified by the presence of
+// JSON tags. If there are multiple top-level fields, the boolean
+// will be false: This condition is an error in Go and we skip all
+// the fields.
+func dominantField(fields []field) (field, bool) {
+	// The fields are sorted in increasing index-length order. The winner
+	// must therefore be one with the shortest index length. Drop all
+	// longer entries, which is easy: just truncate the slice.
+	length := len(fields[0].index)
+	tagged := -1 // Index of first tagged field.
+	for i, f := range fields {
+		if len(f.index) > length {
+			fields = fields[:i]
+			break
+		}
+		if f.tag {
+			if tagged >= 0 {
+				// Multiple tagged fields at the same level: conflict.
+				// Return no field.
+				return field{}, false
+			}
+			tagged = i
+		}
+	}
+	if tagged >= 0 {
+		return fields[tagged], true
+	}
+	// All remaining fields have the same length. If there's more than one,
+	// we have a conflict (two fields named "X" at the same level) and we
+	// return no field.
+	if len(fields) > 1 {
+		return field{}, false
+	}
+	return fields[0], true
+}
+
+var fieldCache struct {
+	sync.RWMutex
+	m map[reflect.Type][]field
+}
+
+// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
+func cachedTypeFields(t reflect.Type) []field {
+	fieldCache.RLock()
+	f := fieldCache.m[t]
+	fieldCache.RUnlock()
+	if f != nil {
+		return f
+	}
+
+	// Compute fields without lock.
+	// Might duplicate effort but won't hold other computations back.
+	f = typeFields(t)
+	if f == nil {
+		f = []field{}
+	}
+
+	fieldCache.Lock()
+	if fieldCache.m == nil {
+		fieldCache.m = map[reflect.Type][]field{}
+	}
+	fieldCache.m[t] = f
+	fieldCache.Unlock()
+	return f
+}

+ 143 - 0
vendor/github.com/dustin/gojson/fold.go

@@ -0,0 +1,143 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+	"bytes"
+	"unicode/utf8"
+)
+
+const (
+	caseMask     = ^byte(0x20) // Mask to ignore case in ASCII.
+	kelvin       = '\u212a'
+	smallLongEss = '\u017f'
+)
+
+// foldFunc returns one of four different case folding equivalence
+// functions, from most general (and slow) to fastest:
+//
+// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
+// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
+// 3) asciiEqualFold, no special, but includes non-letters (including _)
+// 4) simpleLetterEqualFold, no specials, no non-letters.
+//
+// The letters S and K are special because they map to 3 runes, not just 2:
+//  * S maps to s and to U+017F 'ſ' Latin small letter long s
+//  * k maps to K and to U+212A 'K' Kelvin sign
+// See http://play.golang.org/p/tTxjOc0OGo
+//
+// The returned function is specialized for matching against s and
+// should only be given s. It's not curried for performance reasons.
+func foldFunc(s []byte) func(s, t []byte) bool {
+	nonLetter := false
+	special := false // special letter
+	for _, b := range s {
+		if b >= utf8.RuneSelf {
+			return bytes.EqualFold
+		}
+		upper := b & caseMask
+		if upper < 'A' || upper > 'Z' {
+			nonLetter = true
+		} else if upper == 'K' || upper == 'S' {
+			// See above for why these letters are special.
+			special = true
+		}
+	}
+	if special {
+		return equalFoldRight
+	}
+	if nonLetter {
+		return asciiEqualFold
+	}
+	return simpleLetterEqualFold
+}
+
+// equalFoldRight is a specialization of bytes.EqualFold when s is
+// known to be all ASCII (including punctuation), but contains an 's',
+// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
+// See comments on foldFunc.
+func equalFoldRight(s, t []byte) bool {
+	for _, sb := range s {
+		if len(t) == 0 {
+			return false
+		}
+		tb := t[0]
+		if tb < utf8.RuneSelf {
+			if sb != tb {
+				sbUpper := sb & caseMask
+				if 'A' <= sbUpper && sbUpper <= 'Z' {
+					if sbUpper != tb&caseMask {
+						return false
+					}
+				} else {
+					return false
+				}
+			}
+			t = t[1:]
+			continue
+		}
+		// sb is ASCII and t is not. t must be either kelvin
+		// sign or long s; sb must be s, S, k, or K.
+		tr, size := utf8.DecodeRune(t)
+		switch sb {
+		case 's', 'S':
+			if tr != smallLongEss {
+				return false
+			}
+		case 'k', 'K':
+			if tr != kelvin {
+				return false
+			}
+		default:
+			return false
+		}
+		t = t[size:]
+
+	}
+	if len(t) > 0 {
+		return false
+	}
+	return true
+}
+
+// asciiEqualFold is a specialization of bytes.EqualFold for use when
+// s is all ASCII (but may contain non-letters) and contains no
+// special-folding letters.
+// See comments on foldFunc.
+func asciiEqualFold(s, t []byte) bool {
+	if len(s) != len(t) {
+		return false
+	}
+	for i, sb := range s {
+		tb := t[i]
+		if sb == tb {
+			continue
+		}
+		if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
+			if sb&caseMask != tb&caseMask {
+				return false
+			}
+		} else {
+			return false
+		}
+	}
+	return true
+}
+
+// simpleLetterEqualFold is a specialization of bytes.EqualFold for
+// use when s is all ASCII letters (no underscores, etc) and also
+// doesn't contain 'k', 'K', 's', or 'S'.
+// See comments on foldFunc.
+func simpleLetterEqualFold(s, t []byte) bool {
+	if len(s) != len(t) {
+		return false
+	}
+	for i, b := range s {
+		if b&caseMask != t[i]&caseMask {
+			return false
+		}
+	}
+	return true
+}

+ 137 - 0
vendor/github.com/dustin/gojson/indent.go

@@ -0,0 +1,137 @@
+// Copyright 2010 The Go Authors.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import "bytes"
+
+// Compact appends to dst the JSON-encoded src with
+// insignificant space characters elided.
+func Compact(dst *bytes.Buffer, src []byte) error {
+	return compact(dst, src, false)
+}
+
+func compact(dst *bytes.Buffer, src []byte, escape bool) error {
+	origLen := dst.Len()
+	var scan Scanner
+	scan.Reset()
+	start := 0
+	for i, c := range src {
+		if escape && (c == '<' || c == '>' || c == '&') {
+			if start < i {
+				dst.Write(src[start:i])
+			}
+			dst.WriteString(`\u00`)
+			dst.WriteByte(hex[c>>4])
+			dst.WriteByte(hex[c&0xF])
+			start = i + 1
+		}
+		// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
+		if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
+			if start < i {
+				dst.Write(src[start:i])
+			}
+			dst.WriteString(`\u202`)
+			dst.WriteByte(hex[src[i+2]&0xF])
+			start = i + 3
+		}
+		v := scan.Step(&scan, int(c))
+		if v >= ScanSkipSpace {
+			if v == ScanError {
+				break
+			}
+			if start < i {
+				dst.Write(src[start:i])
+			}
+			start = i + 1
+		}
+	}
+	if scan.EOF() == ScanError {
+		dst.Truncate(origLen)
+		return scan.Err
+	}
+	if start < len(src) {
+		dst.Write(src[start:])
+	}
+	return nil
+}
+
+func newline(dst *bytes.Buffer, prefix, indent string, depth int) {
+	dst.WriteByte('\n')
+	dst.WriteString(prefix)
+	for i := 0; i < depth; i++ {
+		dst.WriteString(indent)
+	}
+}
+
+// Indent appends to dst an indented form of the JSON-encoded src.
+// Each element in a JSON object or array begins on a new,
+// indented line beginning with prefix followed by one or more
+// copies of indent according to the indentation nesting.
+// The data appended to dst does not begin with the prefix nor
+// any indentation, and has no trailing newline, to make it
+// easier to embed inside other formatted JSON data.
+func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
+	origLen := dst.Len()
+	var scan Scanner
+	scan.Reset()
+	needIndent := false
+	depth := 0
+	for _, c := range src {
+		scan.bytes++
+		v := scan.Step(&scan, int(c))
+		if v == ScanSkipSpace {
+			continue
+		}
+		if v == ScanError {
+			break
+		}
+		if needIndent && v != ScanEndObject && v != ScanEndArray {
+			needIndent = false
+			depth++
+			newline(dst, prefix, indent, depth)
+		}
+
+		// Emit semantically uninteresting bytes
+		// (in particular, punctuation in strings) unmodified.
+		if v == ScanContinue {
+			dst.WriteByte(c)
+			continue
+		}
+
+		// Add spacing around real punctuation.
+		switch c {
+		case '{', '[':
+			// delay indent so that empty object and array are formatted as {} and [].
+			needIndent = true
+			dst.WriteByte(c)
+
+		case ',':
+			dst.WriteByte(c)
+			newline(dst, prefix, indent, depth)
+
+		case ':':
+			dst.WriteByte(c)
+			dst.WriteByte(' ')
+
+		case '}', ']':
+			if needIndent {
+				// suppress indent in empty object/array
+				needIndent = false
+			} else {
+				depth--
+				newline(dst, prefix, indent, depth)
+			}
+			dst.WriteByte(c)
+
+		default:
+			dst.WriteByte(c)
+		}
+	}
+	if scan.EOF() == ScanError {
+		dst.Truncate(origLen)
+		return scan.Err
+	}
+	return nil
+}

+ 629 - 0
vendor/github.com/dustin/gojson/scanner.go

@@ -0,0 +1,629 @@
+// Copyright 2010 The Go Authors.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+// JSON value parser state machine.
+// Just about at the limit of what is reasonable to write by hand.
+// Some parts are a bit tedious, but overall it nicely factors out the
+// otherwise common code from the multiple scanning functions
+// in this package (Compact, Indent, checkValid, NextValue, etc).
+//
+// This file starts with two simple examples using the scanner
+// before diving into the scanner itself.
+
+import "strconv"
+
+// checkValid verifies that data is valid JSON-encoded data.
+// scan is passed in for use by checkValid to avoid an allocation.
+func checkValid(data []byte, scan *Scanner) error {
+	scan.Reset()
+	for _, c := range data {
+		scan.bytes++
+		if scan.Step(scan, int(c)) == ScanError {
+			return scan.Err
+		}
+	}
+	if scan.EOF() == ScanError {
+		return scan.Err
+	}
+	return nil
+}
+
+// Validate some alleged JSON.  Return nil iff the JSON is valid.
+func Validate(data []byte) error {
+	s := &Scanner{}
+	return checkValid(data, s)
+}
+
+// NextValue splits data after the next whole JSON value,
+// returning that value and the bytes that follow it as separate slices.
+// scan is passed in for use by NextValue to avoid an allocation.
+func NextValue(data []byte, scan *Scanner) (value, rest []byte, err error) {
+	scan.Reset()
+	for i, c := range data {
+		v := scan.Step(scan, int(c))
+		if v >= ScanEnd {
+			switch v {
+			case ScanError:
+				return nil, nil, scan.Err
+			case ScanEnd:
+				return data[0:i], data[i:], nil
+			}
+		}
+	}
+	if scan.EOF() == ScanError {
+		return nil, nil, scan.Err
+	}
+	return data, nil, nil
+}
+
+// A SyntaxError is a description of a JSON syntax error.
+type SyntaxError struct {
+	msg    string // description of error
+	Offset int64  // error occurred after reading Offset bytes
+}
+
+func (e *SyntaxError) Error() string { return e.msg }
+
+// A Scanner is a JSON scanning state machine.
+// Callers call scan.Reset() and then pass bytes in one at a time
+// by calling scan.Step(&scan, c) for each byte.
+// The return value, referred to as an opcode, tells the
+// caller about significant parsing events like beginning
+// and ending literals, objects, and arrays, so that the
+// caller can follow along if it wishes.
+// The return value ScanEnd indicates that a single top-level
+// JSON value has been completed, *before* the byte that
+// just got passed in.  (The indication must be delayed in order
+// to recognize the end of numbers: is 123 a whole value or
+// the beginning of 12345e+6?).
+type Scanner struct {
+	// The step is a func to be called to execute the next transition.
+	// Also tried using an integer constant and a single func
+	// with a switch, but using the func directly was 10% faster
+	// on a 64-bit Mac Mini, and it's nicer to read.
+	Step func(*Scanner, int) int
+
+	// Reached end of top-level value.
+	endTop bool
+
+	// Stack of what we're in the middle of - array values, object keys, object values.
+	parseState []int
+
+	// Error that happened, if any.
+	Err error
+
+	// 1-byte redo (see undo method)
+	redo      bool
+	redoCode  int
+	redoState func(*Scanner, int) int
+
+	// total bytes consumed, updated by decoder.Decode
+	bytes int64
+}
+
+// These values are returned by the state transition functions
+// assigned to Scanner.state and the method Scanner.EOF.
+// They give details about the current state of the scan that
+// callers might be interested to know about.
+// It is okay to ignore the return value of any particular
+// call to Scanner.state: if one call returns ScanError,
+// every subsequent call will return ScanError too.
+const (
+	// Continue.
+	ScanContinue     = iota // uninteresting byte
+	ScanBeginLiteral        // end implied by next result != scanContinue
+	ScanBeginObject         // begin object
+	ScanObjectKey           // just finished object key (string)
+	ScanObjectValue         // just finished non-last object value
+	ScanEndObject           // end object (implies scanObjectValue if possible)
+	ScanBeginArray          // begin array
+	ScanArrayValue          // just finished array value
+	ScanEndArray            // end array (implies scanArrayValue if possible)
+	ScanSkipSpace           // space byte; can skip; known to be last "continue" result
+
+	// Stop.
+	ScanEnd   // top-level value ended *before* this byte; known to be first "stop" result
+	ScanError // hit an error, Scanner.err.
+)
+
+// These values are stored in the parseState stack.
+// They give the current state of a composite value
+// being scanned.  If the parser is inside a nested value
+// the parseState describes the nested state, outermost at entry 0.
+const (
+	parseObjectKey   = iota // parsing object key (before colon)
+	parseObjectValue        // parsing object value (after colon)
+	parseArrayValue         // parsing array value
+)
+
+// reset prepares the scanner for use.
+// It must be called before calling s.step.
+func (s *Scanner) Reset() {
+	s.Step = stateBeginValue
+	s.parseState = s.parseState[0:0]
+	s.Err = nil
+	s.redo = false
+	s.endTop = false
+}
+
+// EOF tells the scanner that the end of input has been reached.
+// It returns a scan status just as s.step does.
+func (s *Scanner) EOF() int {
+	if s.Err != nil {
+		return ScanError
+	}
+	if s.endTop {
+		return ScanEnd
+	}
+	s.Step(s, ' ')
+	if s.endTop {
+		return ScanEnd
+	}
+	if s.Err == nil {
+		s.Err = &SyntaxError{"unexpected end of JSON input", s.bytes}
+	}
+	return ScanError
+}
+
+// pushParseState pushes a new parse state p onto the parse stack.
+func (s *Scanner) pushParseState(p int) {
+	s.parseState = append(s.parseState, p)
+}
+
+// popParseState pops a parse state (already obtained) off the stack
+// and updates s.step accordingly.
+func (s *Scanner) popParseState() {
+	n := len(s.parseState) - 1
+	s.parseState = s.parseState[0:n]
+	s.redo = false
+	if n == 0 {
+		s.Step = stateEndTop
+		s.endTop = true
+	} else {
+		s.Step = stateEndValue
+	}
+}
+
+func isSpace(c rune) bool {
+	return c == ' ' || c == '\t' || c == '\r' || c == '\n'
+}
+
+// stateBeginValueOrEmpty is the state after reading `[`.
+func stateBeginValueOrEmpty(s *Scanner, c int) int {
+	if c <= ' ' && isSpace(rune(c)) {
+		return ScanSkipSpace
+	}
+	if c == ']' {
+		return stateEndValue(s, c)
+	}
+	return stateBeginValue(s, c)
+}
+
+// stateBeginValue is the state at the beginning of the input.
+func stateBeginValue(s *Scanner, c int) int {
+	if c <= ' ' && isSpace(rune(c)) {
+		return ScanSkipSpace
+	}
+	switch c {
+	case '{':
+		s.Step = stateBeginStringOrEmpty
+		s.pushParseState(parseObjectKey)
+		return ScanBeginObject
+	case '[':
+		s.Step = stateBeginValueOrEmpty
+		s.pushParseState(parseArrayValue)
+		return ScanBeginArray
+	case '"':
+		s.Step = stateInString
+		return ScanBeginLiteral
+	case '-':
+		s.Step = stateNeg
+		return ScanBeginLiteral
+	case '0': // beginning of 0.123
+		s.Step = state0
+		return ScanBeginLiteral
+	case 't': // beginning of true
+		s.Step = stateT
+		return ScanBeginLiteral
+	case 'f': // beginning of false
+		s.Step = stateF
+		return ScanBeginLiteral
+	case 'n': // beginning of null
+		s.Step = stateN
+		return ScanBeginLiteral
+	}
+	if '1' <= c && c <= '9' { // beginning of 1234.5
+		s.Step = state1
+		return ScanBeginLiteral
+	}
+	return s.error(c, "looking for beginning of value")
+}
+
+// stateBeginStringOrEmpty is the state after reading `{`.
+func stateBeginStringOrEmpty(s *Scanner, c int) int {
+	if c <= ' ' && isSpace(rune(c)) {
+		return ScanSkipSpace
+	}
+	if c == '}' {
+		n := len(s.parseState)
+		s.parseState[n-1] = parseObjectValue
+		return stateEndValue(s, c)
+	}
+	return stateBeginString(s, c)
+}
+
+// stateBeginString is the state after reading `{"key": value,`.
+func stateBeginString(s *Scanner, c int) int {
+	if c <= ' ' && isSpace(rune(c)) {
+		return ScanSkipSpace
+	}
+	if c == '"' {
+		s.Step = stateInString
+		return ScanBeginLiteral
+	}
+	return s.error(c, "looking for beginning of object key string")
+}
+
+// stateEndValue is the state after completing a value,
+// such as after reading `{}` or `true` or `["x"`.
+func stateEndValue(s *Scanner, c int) int {
+	n := len(s.parseState)
+	if n == 0 {
+		// Completed top-level before the current byte.
+		s.Step = stateEndTop
+		s.endTop = true
+		return stateEndTop(s, c)
+	}
+	if c <= ' ' && isSpace(rune(c)) {
+		s.Step = stateEndValue
+		return ScanSkipSpace
+	}
+	ps := s.parseState[n-1]
+	switch ps {
+	case parseObjectKey:
+		if c == ':' {
+			s.parseState[n-1] = parseObjectValue
+			s.Step = stateBeginValue
+			return ScanObjectKey
+		}
+		return s.error(c, "after object key")
+	case parseObjectValue:
+		if c == ',' {
+			s.parseState[n-1] = parseObjectKey
+			s.Step = stateBeginString
+			return ScanObjectValue
+		}
+		if c == '}' {
+			s.popParseState()
+			return ScanEndObject
+		}
+		return s.error(c, "after object key:value pair")
+	case parseArrayValue:
+		if c == ',' {
+			s.Step = stateBeginValue
+			return ScanArrayValue
+		}
+		if c == ']' {
+			s.popParseState()
+			return ScanEndArray
+		}
+		return s.error(c, "after array element")
+	}
+	return s.error(c, "")
+}
+
+// stateEndTop is the state after finishing the top-level value,
+// such as after reading `{}` or `[1,2,3]`.
+// Only space characters should be seen now.
+func stateEndTop(s *Scanner, c int) int {
+	if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
+		// Complain about non-space byte on next call.
+		s.error(c, "after top-level value")
+	}
+	return ScanEnd
+}
+
+// stateInString is the state after reading `"`.
+func stateInString(s *Scanner, c int) int {
+	if c == '"' {
+		s.Step = stateEndValue
+		return ScanContinue
+	}
+	if c == '\\' {
+		s.Step = stateInStringEsc
+		return ScanContinue
+	}
+	if c < 0x20 {
+		return s.error(c, "in string literal")
+	}
+	return ScanContinue
+}
+
+// stateInStringEsc is the state after reading `"\` during a quoted string.
+func stateInStringEsc(s *Scanner, c int) int {
+	switch c {
+	case 'b', 'f', 'n', 'r', 't', '\\', '/', '"':
+		s.Step = stateInString
+		return ScanContinue
+	}
+	if c == 'u' {
+		s.Step = stateInStringEscU
+		return ScanContinue
+	}
+	return s.error(c, "in string escape code")
+}
+
+// stateInStringEscU is the state after reading `"\u` during a quoted string.
+func stateInStringEscU(s *Scanner, c int) int {
+	if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+		s.Step = stateInStringEscU1
+		return ScanContinue
+	}
+	// numbers
+	return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU1 is the state after reading `"\u1` during a quoted string.
+func stateInStringEscU1(s *Scanner, c int) int {
+	if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+		s.Step = stateInStringEscU12
+		return ScanContinue
+	}
+	// numbers
+	return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU12 is the state after reading `"\u12` during a quoted string.
+func stateInStringEscU12(s *Scanner, c int) int {
+	if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+		s.Step = stateInStringEscU123
+		return ScanContinue
+	}
+	// numbers
+	return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU123 is the state after reading `"\u123` during a quoted string.
+func stateInStringEscU123(s *Scanner, c int) int {
+	if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+		s.Step = stateInString
+		return ScanContinue
+	}
+	// numbers
+	return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateNeg is the state after reading `-` during a number.
+func stateNeg(s *Scanner, c int) int {
+	if c == '0' {
+		s.Step = state0
+		return ScanContinue
+	}
+	if '1' <= c && c <= '9' {
+		s.Step = state1
+		return ScanContinue
+	}
+	return s.error(c, "in numeric literal")
+}
+
+// state1 is the state after reading a non-zero integer during a number,
+// such as after reading `1` or `100` but not `0`.
+func state1(s *Scanner, c int) int {
+	if '0' <= c && c <= '9' {
+		s.Step = state1
+		return ScanContinue
+	}
+	return state0(s, c)
+}
+
+// state0 is the state after reading `0` during a number.
+func state0(s *Scanner, c int) int {
+	if c == '.' {
+		s.Step = stateDot
+		return ScanContinue
+	}
+	if c == 'e' || c == 'E' {
+		s.Step = stateE
+		return ScanContinue
+	}
+	return stateEndValue(s, c)
+}
+
+// stateDot is the state after reading the integer and decimal point in a number,
+// such as after reading `1.`.
+func stateDot(s *Scanner, c int) int {
+	if '0' <= c && c <= '9' {
+		s.Step = stateDot0
+		return ScanContinue
+	}
+	return s.error(c, "after decimal point in numeric literal")
+}
+
+// stateDot0 is the state after reading the integer, decimal point, and subsequent
+// digits of a number, such as after reading `3.14`.
+func stateDot0(s *Scanner, c int) int {
+	if '0' <= c && c <= '9' {
+		s.Step = stateDot0
+		return ScanContinue
+	}
+	if c == 'e' || c == 'E' {
+		s.Step = stateE
+		return ScanContinue
+	}
+	return stateEndValue(s, c)
+}
+
+// stateE is the state after reading the mantissa and e in a number,
+// such as after reading `314e` or `0.314e`.
+func stateE(s *Scanner, c int) int {
+	if c == '+' {
+		s.Step = stateESign
+		return ScanContinue
+	}
+	if c == '-' {
+		s.Step = stateESign
+		return ScanContinue
+	}
+	return stateESign(s, c)
+}
+
+// stateESign is the state after reading the mantissa, e, and sign in a number,
+// such as after reading `314e-` or `0.314e+`.
+func stateESign(s *Scanner, c int) int {
+	if '0' <= c && c <= '9' {
+		s.Step = stateE0
+		return ScanContinue
+	}
+	return s.error(c, "in exponent of numeric literal")
+}
+
+// stateE0 is the state after reading the mantissa, e, optional sign,
+// and at least one digit of the exponent in a number,
+// such as after reading `314e-2` or `0.314e+1` or `3.14e0`.
+func stateE0(s *Scanner, c int) int {
+	if '0' <= c && c <= '9' {
+		s.Step = stateE0
+		return ScanContinue
+	}
+	return stateEndValue(s, c)
+}
+
+// stateT is the state after reading `t`.
+func stateT(s *Scanner, c int) int {
+	if c == 'r' {
+		s.Step = stateTr
+		return ScanContinue
+	}
+	return s.error(c, "in literal true (expecting 'r')")
+}
+
+// stateTr is the state after reading `tr`.
+func stateTr(s *Scanner, c int) int {
+	if c == 'u' {
+		s.Step = stateTru
+		return ScanContinue
+	}
+	return s.error(c, "in literal true (expecting 'u')")
+}
+
+// stateTru is the state after reading `tru`.
+func stateTru(s *Scanner, c int) int {
+	if c == 'e' {
+		s.Step = stateEndValue
+		return ScanContinue
+	}
+	return s.error(c, "in literal true (expecting 'e')")
+}
+
+// stateF is the state after reading `f`.
+func stateF(s *Scanner, c int) int {
+	if c == 'a' {
+		s.Step = stateFa
+		return ScanContinue
+	}
+	return s.error(c, "in literal false (expecting 'a')")
+}
+
+// stateFa is the state after reading `fa`.
+func stateFa(s *Scanner, c int) int {
+	if c == 'l' {
+		s.Step = stateFal
+		return ScanContinue
+	}
+	return s.error(c, "in literal false (expecting 'l')")
+}
+
+// stateFal is the state after reading `fal`.
+func stateFal(s *Scanner, c int) int {
+	if c == 's' {
+		s.Step = stateFals
+		return ScanContinue
+	}
+	return s.error(c, "in literal false (expecting 's')")
+}
+
+// stateFals is the state after reading `fals`.
+func stateFals(s *Scanner, c int) int {
+	if c == 'e' {
+		s.Step = stateEndValue
+		return ScanContinue
+	}
+	return s.error(c, "in literal false (expecting 'e')")
+}
+
+// stateN is the state after reading `n`.
+func stateN(s *Scanner, c int) int {
+	if c == 'u' {
+		s.Step = stateNu
+		return ScanContinue
+	}
+	return s.error(c, "in literal null (expecting 'u')")
+}
+
+// stateNu is the state after reading `nu`.
+func stateNu(s *Scanner, c int) int {
+	if c == 'l' {
+		s.Step = stateNul
+		return ScanContinue
+	}
+	return s.error(c, "in literal null (expecting 'l')")
+}
+
+// stateNul is the state after reading `nul`.
+func stateNul(s *Scanner, c int) int {
+	if c == 'l' {
+		s.Step = stateEndValue
+		return ScanContinue
+	}
+	return s.error(c, "in literal null (expecting 'l')")
+}
+
+// stateError is the state after reaching a syntax error,
+// such as after reading `[1}` or `5.1.2`.
+func stateError(s *Scanner, c int) int {
+	return ScanError
+}
+
+// error records an error and switches to the error state.
+func (s *Scanner) error(c int, context string) int {
+	s.Step = stateError
+	s.Err = &SyntaxError{"invalid character " + quoteChar(c) + " " + context, s.bytes}
+	return ScanError
+}
+
+// quoteChar formats c as a quoted character literal
+func quoteChar(c int) string {
+	// special cases - different from quoted strings
+	if c == '\'' {
+		return `'\''`
+	}
+	if c == '"' {
+		return `'"'`
+	}
+
+	// use quoted string with different quotation marks
+	s := strconv.Quote(string(c))
+	return "'" + s[1:len(s)-1] + "'"
+}
+
+// undo causes the scanner to return scanCode from the next state transition.
+// This gives callers a simple 1-byte undo mechanism.
+func (s *Scanner) undo(scanCode int) {
+	if s.redo {
+		panic("json: invalid use of scanner")
+	}
+	s.redoCode = scanCode
+	s.redoState = s.Step
+	s.Step = stateRedo
+	s.redo = true
+}
+
+// stateRedo helps implement the scanner's 1-byte undo.
+func stateRedo(s *Scanner, c int) int {
+	s.redo = false
+	s.Step = s.redoState
+	return s.redoCode
+}

+ 200 - 0
vendor/github.com/dustin/gojson/stream.go

@@ -0,0 +1,200 @@
+// Copyright 2010 The Go Authors.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+	"bytes"
+	"errors"
+	"io"
+)
+
+// A Decoder reads and decodes JSON objects from an input stream.
+type Decoder struct {
+	r    io.Reader
+	buf  []byte
+	d    decodeState
+	scan Scanner
+	err  error
+}
+
+// NewDecoder returns a new decoder that reads from r.
+//
+// The decoder introduces its own buffering and may
+// read data from r beyond the JSON values requested.
+func NewDecoder(r io.Reader) *Decoder {
+	return &Decoder{r: r}
+}
+
+// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
+// Number instead of as a float64.
+func (dec *Decoder) UseNumber() { dec.d.useNumber = true }
+
+// Decode reads the next JSON-encoded value from its
+// input and stores it in the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about
+// the conversion of JSON into a Go value.
+func (dec *Decoder) Decode(v interface{}) error {
+	if dec.err != nil {
+		return dec.err
+	}
+
+	n, err := dec.readValue()
+	if err != nil {
+		return err
+	}
+
+	// Don't save err from unmarshal into dec.err:
+	// the connection is still usable since we read a complete JSON
+	// object from it before the error happened.
+	dec.d.init(dec.buf[0:n])
+	err = dec.d.unmarshal(v)
+
+	// Slide rest of data down.
+	rest := copy(dec.buf, dec.buf[n:])
+	dec.buf = dec.buf[0:rest]
+
+	return err
+}
+
+// Buffered returns a reader of the data remaining in the Decoder's
+// buffer. The reader is valid until the next call to Decode.
+func (dec *Decoder) Buffered() io.Reader {
+	return bytes.NewReader(dec.buf)
+}
+
+// readValue reads a JSON value into dec.buf.
+// It returns the length of the encoding.
+func (dec *Decoder) readValue() (int, error) {
+	dec.scan.Reset()
+
+	scanp := 0
+	var err error
+Input:
+	for {
+		// Look in the buffer for a new value.
+		for i, c := range dec.buf[scanp:] {
+			dec.scan.bytes++
+			v := dec.scan.Step(&dec.scan, int(c))
+			if v == ScanEnd {
+				scanp += i
+				break Input
+			}
+			// scanEnd is delayed one byte.
+			// We might block trying to get that byte from src,
+			// so instead invent a space byte.
+			if (v == ScanEndObject || v == ScanEndArray) && dec.scan.Step(&dec.scan, ' ') == ScanEnd {
+				scanp += i + 1
+				break Input
+			}
+			if v == ScanError {
+				dec.err = dec.scan.Err
+				return 0, dec.scan.Err
+			}
+		}
+		scanp = len(dec.buf)
+
+		// Did the last read have an error?
+		// Delayed until now to allow buffer scan.
+		if err != nil {
+			if err == io.EOF {
+				if dec.scan.Step(&dec.scan, ' ') == ScanEnd {
+					break Input
+				}
+				if nonSpace(dec.buf) {
+					err = io.ErrUnexpectedEOF
+				}
+			}
+			dec.err = err
+			return 0, err
+		}
+
+		// Make room to read more into the buffer.
+		const minRead = 512
+		if cap(dec.buf)-len(dec.buf) < minRead {
+			newBuf := make([]byte, len(dec.buf), 2*cap(dec.buf)+minRead)
+			copy(newBuf, dec.buf)
+			dec.buf = newBuf
+		}
+
+		// Read.  Delay error for next iteration (after scan).
+		var n int
+		n, err = dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)])
+		dec.buf = dec.buf[0 : len(dec.buf)+n]
+	}
+	return scanp, nil
+}
+
+func nonSpace(b []byte) bool {
+	for _, c := range b {
+		if !isSpace(rune(c)) {
+			return true
+		}
+	}
+	return false
+}
+
+// An Encoder writes JSON objects to an output stream.
+type Encoder struct {
+	w   io.Writer
+	err error
+}
+
+// NewEncoder returns a new encoder that writes to w.
+func NewEncoder(w io.Writer) *Encoder {
+	return &Encoder{w: w}
+}
+
+// Encode writes the JSON encoding of v to the stream,
+// followed by a newline character.
+//
+// See the documentation for Marshal for details about the
+// conversion of Go values to JSON.
+func (enc *Encoder) Encode(v interface{}) error {
+	if enc.err != nil {
+		return enc.err
+	}
+	e := newEncodeState()
+	err := e.marshal(v)
+	if err != nil {
+		return err
+	}
+
+	// Terminate each value with a newline.
+	// This makes the output look a little nicer
+	// when debugging, and some kind of space
+	// is required if the encoded value was a number,
+	// so that the reader knows there aren't more
+	// digits coming.
+	e.WriteByte('\n')
+
+	if _, err = enc.w.Write(e.Bytes()); err != nil {
+		enc.err = err
+	}
+	encodeStatePool.Put(e)
+	return err
+}
+
+// RawMessage is a raw encoded JSON object.
+// It implements Marshaler and Unmarshaler and can
+// be used to delay JSON decoding or precompute a JSON encoding.
+type RawMessage []byte
+
+// MarshalJSON returns *m as the JSON encoding of m.
+func (m *RawMessage) MarshalJSON() ([]byte, error) {
+	return *m, nil
+}
+
+// UnmarshalJSON sets *m to a copy of data.
+func (m *RawMessage) UnmarshalJSON(data []byte) error {
+	if m == nil {
+		return errors.New("json.RawMessage: UnmarshalJSON on nil pointer")
+	}
+	*m = append((*m)[0:0], data...)
+	return nil
+}
+
+var _ Marshaler = (*RawMessage)(nil)
+var _ Unmarshaler = (*RawMessage)(nil)

+ 44 - 0
vendor/github.com/dustin/gojson/tags.go

@@ -0,0 +1,44 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+	"strings"
+)
+
+// tagOptions is the string following a comma in a struct field's "json"
+// tag, or the empty string. It does not include the leading comma.
+type tagOptions string
+
+// parseTag splits a struct field's json tag into its name and
+// comma-separated options.
+func parseTag(tag string) (string, tagOptions) {
+	if idx := strings.Index(tag, ","); idx != -1 {
+		return tag[:idx], tagOptions(tag[idx+1:])
+	}
+	return tag, tagOptions("")
+}
+
+// Contains reports whether a comma-separated list of options
+// contains a particular substr flag. substr must be surrounded by a
+// string boundary or commas.
+func (o tagOptions) Contains(optionName string) bool {
+	if len(o) == 0 {
+		return false
+	}
+	s := string(o)
+	for s != "" {
+		var next string
+		i := strings.Index(s, ",")
+		if i >= 0 {
+			s, next = s[:i], s[i+1:]
+		}
+		if s == optionName {
+			return true
+		}
+		s = next
+	}
+	return false
+}

+ 707 - 0
vendor/github.com/garyburd/go-oauth/oauth/oauth.go

@@ -0,0 +1,707 @@
+// Copyright 2010 Gary Burd
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+// Package oauth is consumer interface for OAuth 1.0, OAuth 1.0a and RFC 5849.
+//
+// Redirection-based Authorization
+//
+// This section outlines how to use the oauth package in redirection-based
+// authorization (http://tools.ietf.org/html/rfc5849#section-2).
+//
+// Step 1: Create a Client using credentials and URIs provided by the server.
+// The Client can be initialized once at application startup and stored in a
+// package-level variable.
+//
+// Step 2: Request temporary credentials using the Client
+// RequestTemporaryCredentials method. The callbackURL parameter is the URL of
+// the callback handler in step 4. Save the returned credential secret so that
+// it can be later found using credential token as a key. The secret can be
+// stored in a database keyed by the token. Another option is to store the
+// token and secret in session storage or a cookie.
+//
+// Step 3: Redirect the user to URL returned from AuthorizationURL method. The
+// AuthorizationURL method uses the temporary credentials from step 2 and other
+// parameters as specified by the server.
+//
+// Step 4: The server redirects back to the callback URL specified in step 2
+// with the temporary token and a verifier. Use the temporary token to find the
+// temporary secret saved in step 2. Using the temporary token, temporary
+// secret and verifier, request token credentials using the client RequestToken
+// method. Save the returned credentials for later use in the application.
+//
+// Signing Requests
+//
+// The Client type has two low-level methods for signing requests, SignForm and
+// SetAuthorizationHeader.
+//
+// The SignForm method adds an OAuth signature to a form. The application makes
+// an authenticated request by encoding the modified form to the query string
+// or request body.
+//
+// The SetAuthorizationHeader method adds an OAuth siganture to a request
+// header. The SetAuthorizationHeader method is the only way to correctly sign
+// a request if the application sets the URL Opaque field when making a
+// request.
+//
+// The Get, Put, Post and Delete methods sign and invoke a request using the
+// supplied net/http Client. These methods are easy to use, but not as flexible
+// as constructing a request using one of the low-level methods.
+//
+// Context With HTTP Client
+//
+// A context-enabled method can include a custom HTTP client in the
+// context and execute an HTTP request using the included HTTP client.
+//
+//     hc := &http.Client{Timeout: 2 * time.Second}
+//     ctx := context.WithValue(context.Background(), oauth.HTTPClient, hc)
+//     c := oauth.Client{ /* Any settings */ }
+//     resp, err := c.GetContext(ctx, &oauth.Credentials{}, rawurl, nil)
+package oauth // import "github.com/garyburd/go-oauth/oauth"
+
+import (
+	"bytes"
+	"crypto"
+	"crypto/hmac"
+	"crypto/rand"
+	"crypto/rsa"
+	"crypto/sha1"
+	"encoding/base64"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"sort"
+	"strconv"
+	"strings"
+	"sync/atomic"
+	"time"
+
+	"golang.org/x/net/context"
+)
+
+// noscape[b] is true if b should not be escaped per section 3.6 of the RFC.
+var noEscape = [256]bool{
+	'A': true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true,
+	'a': true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true, true,
+	'0': true, true, true, true, true, true, true, true, true, true,
+	'-': true,
+	'.': true,
+	'_': true,
+	'~': true,
+}
+
+// encode encodes string per section 3.6 of the RFC. If double is true, then
+// the encoding is applied twice.
+func encode(s string, double bool) []byte {
+	// Compute size of result.
+	m := 3
+	if double {
+		m = 5
+	}
+	n := 0
+	for i := 0; i < len(s); i++ {
+		if noEscape[s[i]] {
+			n++
+		} else {
+			n += m
+		}
+	}
+
+	p := make([]byte, n)
+
+	// Encode it.
+	j := 0
+	for i := 0; i < len(s); i++ {
+		b := s[i]
+		if noEscape[b] {
+			p[j] = b
+			j++
+		} else if double {
+			p[j] = '%'
+			p[j+1] = '2'
+			p[j+2] = '5'
+			p[j+3] = "0123456789ABCDEF"[b>>4]
+			p[j+4] = "0123456789ABCDEF"[b&15]
+			j += 5
+		} else {
+			p[j] = '%'
+			p[j+1] = "0123456789ABCDEF"[b>>4]
+			p[j+2] = "0123456789ABCDEF"[b&15]
+			j += 3
+		}
+	}
+	return p
+}
+
+type keyValue struct{ key, value []byte }
+
+type byKeyValue []keyValue
+
+func (p byKeyValue) Len() int      { return len(p) }
+func (p byKeyValue) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+func (p byKeyValue) Less(i, j int) bool {
+	sgn := bytes.Compare(p[i].key, p[j].key)
+	if sgn == 0 {
+		sgn = bytes.Compare(p[i].value, p[j].value)
+	}
+	return sgn < 0
+}
+
+func (p byKeyValue) appendValues(values url.Values) byKeyValue {
+	for k, vs := range values {
+		k := encode(k, true)
+		for _, v := range vs {
+			v := encode(v, true)
+			p = append(p, keyValue{k, v})
+		}
+	}
+	return p
+}
+
+// writeBaseString writes method, url, and params to w using the OAuth signature
+// base string computation described in section 3.4.1 of the RFC.
+func writeBaseString(w io.Writer, method string, u *url.URL, form url.Values, oauthParams map[string]string) {
+	// Method
+	w.Write(encode(strings.ToUpper(method), false))
+	w.Write([]byte{'&'})
+
+	// URL
+	scheme := strings.ToLower(u.Scheme)
+	host := strings.ToLower(u.Host)
+
+	uNoQuery := *u
+	uNoQuery.RawQuery = ""
+	path := uNoQuery.RequestURI()
+
+	switch {
+	case scheme == "http" && strings.HasSuffix(host, ":80"):
+		host = host[:len(host)-len(":80")]
+	case scheme == "https" && strings.HasSuffix(host, ":443"):
+		host = host[:len(host)-len(":443")]
+	}
+
+	w.Write(encode(scheme, false))
+	w.Write(encode("://", false))
+	w.Write(encode(host, false))
+	w.Write(encode(path, false))
+	w.Write([]byte{'&'})
+
+	// Create sorted slice of encoded parameters. Parameter keys and values are
+	// double encoded in a single step. This is safe because double encoding
+	// does not change the sort order.
+	queryParams := u.Query()
+	p := make(byKeyValue, 0, len(form)+len(queryParams)+len(oauthParams))
+	p = p.appendValues(form)
+	p = p.appendValues(queryParams)
+	for k, v := range oauthParams {
+		p = append(p, keyValue{encode(k, true), encode(v, true)})
+	}
+	sort.Sort(p)
+
+	// Write the parameters.
+	encodedAmp := encode("&", false)
+	encodedEqual := encode("=", false)
+	sep := false
+	for _, kv := range p {
+		if sep {
+			w.Write(encodedAmp)
+		} else {
+			sep = true
+		}
+		w.Write(kv.key)
+		w.Write(encodedEqual)
+		w.Write(kv.value)
+	}
+}
+
+var nonceCounter uint64
+
+func init() {
+	if err := binary.Read(rand.Reader, binary.BigEndian, &nonceCounter); err != nil {
+		// fallback to time if rand reader is broken
+		nonceCounter = uint64(time.Now().UnixNano())
+	}
+}
+
+// nonce returns a unique string.
+func nonce() string {
+	return strconv.FormatUint(atomic.AddUint64(&nonceCounter, 1), 16)
+}
+
+// SignatureMethod identifies a signature method.
+type SignatureMethod int
+
+func (sm SignatureMethod) String() string {
+	switch sm {
+	case RSASHA1:
+		return "RSA-SHA1"
+	case HMACSHA1:
+		return "HMAC-SHA1"
+	case PLAINTEXT:
+		return "PLAINTEXT"
+	default:
+		return "unknown"
+	}
+}
+
+const (
+	HMACSHA1  SignatureMethod = iota // HMAC-SHA1
+	RSASHA1                          // RSA-SHA1
+	PLAINTEXT                        // Plain text
+)
+
+// Credentials represents client, temporary and token credentials.
+type Credentials struct {
+	Token  string // Also known as consumer key or access token.
+	Secret string // Also known as consumer secret or access token secret.
+}
+
+// Client represents an OAuth client.
+type Client struct {
+	// Credentials specifies the client key and secret.
+	// Also known as the consumer key and secret
+	Credentials Credentials
+
+	// TemporaryCredentialRequestURI is the endpoint used by the client to
+	// obtain a set of temporary credentials. Also known as the request token
+	// URL.
+	TemporaryCredentialRequestURI string
+
+	// ResourceOwnerAuthorizationURI is the endpoint to which the resource
+	// owner is redirected to grant authorization. Also known as authorization
+	// URL.
+	ResourceOwnerAuthorizationURI string
+
+	// TokenRequestURI is the endpoint used by the client to request a set of
+	// token credentials using a set of temporary credentials. Also known as
+	// access token URL.
+	TokenRequestURI string
+
+	// RenewCredentialRequestURI is the endpoint the client uses to
+	// request a new set of token credentials using the old set of credentials.
+	RenewCredentialRequestURI string
+
+	// TemporaryCredentialsMethod is the HTTP method used by the client to
+	// obtain a set of temporary credentials. If this field is the empty
+	// string, then POST is used.
+	TemporaryCredentialsMethod string
+
+	// TokenCredentailsMethod is the HTTP method used by the client to request
+	// a set of token credentials. If this field is the empty string, then POST
+	// is used.
+	TokenCredentailsMethod string
+
+	// Header specifies optional extra headers for requests.
+	Header http.Header
+
+	// SignatureMethod specifies the method for signing a request.
+	SignatureMethod SignatureMethod
+
+	// PrivateKey is the private key to use for RSA-SHA1 signatures. This field
+	// must be set for RSA-SHA1 signatures and ignored for other signature
+	// methods.
+	PrivateKey *rsa.PrivateKey
+}
+
+type request struct {
+	credentials   *Credentials
+	method        string
+	u             *url.URL
+	form          url.Values
+	verifier      string
+	sessionHandle string
+	callbackURL   string
+}
+
+var testHook = func(map[string]string) {}
+
+// oauthParams returns the OAuth request parameters for the given credentials,
+// method, URL and application params. See
+// http://tools.ietf.org/html/rfc5849#section-3.4 for more information about
+// signatures.
+func (c *Client) oauthParams(r *request) (map[string]string, error) {
+	oauthParams := map[string]string{
+		"oauth_consumer_key":     c.Credentials.Token,
+		"oauth_signature_method": c.SignatureMethod.String(),
+		"oauth_version":          "1.0",
+	}
+
+	if c.SignatureMethod != PLAINTEXT {
+		oauthParams["oauth_timestamp"] = strconv.FormatInt(time.Now().Unix(), 10)
+		oauthParams["oauth_nonce"] = nonce()
+	}
+
+	if r.credentials != nil {
+		oauthParams["oauth_token"] = r.credentials.Token
+	}
+
+	if r.verifier != "" {
+		oauthParams["oauth_verifier"] = r.verifier
+	}
+
+	if r.sessionHandle != "" {
+		oauthParams["oauth_session_handle"] = r.sessionHandle
+	}
+
+	if r.callbackURL != "" {
+		oauthParams["oauth_callback"] = r.callbackURL
+	}
+
+	testHook(oauthParams)
+
+	var signature string
+
+	switch c.SignatureMethod {
+	case HMACSHA1:
+		key := encode(c.Credentials.Secret, false)
+		key = append(key, '&')
+		if r.credentials != nil {
+			key = append(key, encode(r.credentials.Secret, false)...)
+		}
+		h := hmac.New(sha1.New, key)
+		writeBaseString(h, r.method, r.u, r.form, oauthParams)
+		signature = base64.StdEncoding.EncodeToString(h.Sum(key[:0]))
+	case RSASHA1:
+		if c.PrivateKey == nil {
+			return nil, errors.New("oauth: private key not set")
+		}
+		h := sha1.New()
+		writeBaseString(h, r.method, r.u, r.form, oauthParams)
+		rawSignature, err := rsa.SignPKCS1v15(rand.Reader, c.PrivateKey, crypto.SHA1, h.Sum(nil))
+		if err != nil {
+			return nil, err
+		}
+		signature = base64.StdEncoding.EncodeToString(rawSignature)
+	case PLAINTEXT:
+		rawSignature := encode(c.Credentials.Secret, false)
+		rawSignature = append(rawSignature, '&')
+		if r.credentials != nil {
+			rawSignature = append(rawSignature, encode(r.credentials.Secret, false)...)
+		}
+		signature = string(rawSignature)
+	default:
+		return nil, errors.New("oauth: unknown signature method")
+	}
+
+	oauthParams["oauth_signature"] = signature
+	return oauthParams, nil
+}
+
+// SignForm adds an OAuth signature to form. The urlStr argument must not
+// include a query string.
+//
+// See http://tools.ietf.org/html/rfc5849#section-3.5.2 for
+// information about transmitting OAuth parameters in a request body and
+// http://tools.ietf.org/html/rfc5849#section-3.5.2 for information about
+// transmitting OAuth parameters in a query string.
+func (c *Client) SignForm(credentials *Credentials, method, urlStr string, form url.Values) error {
+	u, err := url.Parse(urlStr)
+	switch {
+	case err != nil:
+		return err
+	case u.RawQuery != "":
+		return errors.New("oauth: urlStr argument to SignForm must not include a query string")
+	}
+	p, err := c.oauthParams(&request{credentials: credentials, method: method, u: u, form: form})
+	if err != nil {
+		return err
+	}
+	for k, v := range p {
+		form.Set(k, v)
+	}
+	return nil
+}
+
+// SignParam is deprecated. Use SignForm instead.
+func (c *Client) SignParam(credentials *Credentials, method, urlStr string, params url.Values) {
+	u, _ := url.Parse(urlStr)
+	u.RawQuery = ""
+	p, _ := c.oauthParams(&request{credentials: credentials, method: method, u: u, form: params})
+	for k, v := range p {
+		params.Set(k, v)
+	}
+}
+
+var oauthKeys = []string{
+	"oauth_consumer_key",
+	"oauth_nonce",
+	"oauth_signature",
+	"oauth_signature_method",
+	"oauth_timestamp",
+	"oauth_token",
+	"oauth_version",
+	"oauth_callback",
+	"oauth_verifier",
+	"oauth_session_handle",
+}
+
+func (c *Client) authorizationHeader(r *request) (string, error) {
+	p, err := c.oauthParams(r)
+	if err != nil {
+		return "", err
+	}
+	var h []byte
+	// Append parameters in a fixed order to support testing.
+	for _, k := range oauthKeys {
+		if v, ok := p[k]; ok {
+			if h == nil {
+				h = []byte(`OAuth `)
+			} else {
+				h = append(h, ", "...)
+			}
+			h = append(h, k...)
+			h = append(h, `="`...)
+			h = append(h, encode(v, false)...)
+			h = append(h, '"')
+		}
+	}
+	return string(h), nil
+}
+
+// AuthorizationHeader returns the HTTP authorization header value for given
+// method, URL and parameters.
+//
+// AuthorizationHeader is deprecated. Use SetAuthorizationHeader instead.
+func (c *Client) AuthorizationHeader(credentials *Credentials, method string, u *url.URL, params url.Values) string {
+	// Signing a request can return an error. This method is deprecated because
+	// this method does not return an error.
+	v, _ := c.authorizationHeader(&request{credentials: credentials, method: method, u: u, form: params})
+	return v
+}
+
+// SetAuthorizationHeader adds an OAuth signature to a request header.
+//
+// See http://tools.ietf.org/html/rfc5849#section-3.5.1 for information about
+// transmitting OAuth parameters in an HTTP request header.
+func (c *Client) SetAuthorizationHeader(header http.Header, credentials *Credentials, method string, u *url.URL, form url.Values) error {
+	v, err := c.authorizationHeader(&request{credentials: credentials, method: method, u: u, form: form})
+	if err != nil {
+		return err
+	}
+	header.Set("Authorization", v)
+	return nil
+}
+
+func (c *Client) do(ctx context.Context, urlStr string, r *request) (*http.Response, error) {
+	var body io.Reader
+	if r.method != http.MethodGet {
+		body = strings.NewReader(r.form.Encode())
+	}
+	req, err := http.NewRequest(r.method, urlStr, body)
+	if err != nil {
+		return nil, err
+	}
+	if req.URL.RawQuery != "" {
+		return nil, errors.New("oauth: url must not contain a query string")
+	}
+	for k, v := range c.Header {
+		req.Header[k] = v
+	}
+	r.u = req.URL
+	auth, err := c.authorizationHeader(r)
+	if err != nil {
+		return nil, err
+	}
+	req.Header.Set("Authorization", auth)
+	if r.method == http.MethodGet {
+		req.URL.RawQuery = r.form.Encode()
+	} else {
+		req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+	}
+	req = requestWithContext(ctx, req)
+	client := contextClient(ctx)
+	return client.Do(req)
+}
+
+// Get issues a GET to the specified URL with form added as a query string.
+func (c *Client) Get(client *http.Client, credentials *Credentials, urlStr string, form url.Values) (*http.Response, error) {
+	ctx := context.WithValue(context.Background(), HTTPClient, client)
+	return c.GetContext(ctx, credentials, urlStr, form)
+}
+
+// GetContext uses Context to perform Get.
+func (c *Client) GetContext(ctx context.Context, credentials *Credentials, urlStr string, form url.Values) (*http.Response, error) {
+	return c.do(ctx, urlStr, &request{method: http.MethodGet, credentials: credentials, form: form})
+}
+
+// Post issues a POST with the specified form.
+func (c *Client) Post(client *http.Client, credentials *Credentials, urlStr string, form url.Values) (*http.Response, error) {
+	ctx := context.WithValue(context.Background(), HTTPClient, client)
+	return c.PostContext(ctx, credentials, urlStr, form)
+}
+
+// PostContext uses Context to perform Post.
+func (c *Client) PostContext(ctx context.Context, credentials *Credentials, urlStr string, form url.Values) (*http.Response, error) {
+	return c.do(ctx, urlStr, &request{method: http.MethodPost, credentials: credentials, form: form})
+}
+
+// Delete issues a DELETE with the specified form.
+func (c *Client) Delete(client *http.Client, credentials *Credentials, urlStr string, form url.Values) (*http.Response, error) {
+	ctx := context.WithValue(context.Background(), HTTPClient, client)
+	return c.DeleteContext(ctx, credentials, urlStr, form)
+}
+
+// DeleteContext uses Context to perform Delete.
+func (c *Client) DeleteContext(ctx context.Context, credentials *Credentials, urlStr string, form url.Values) (*http.Response, error) {
+	return c.do(ctx, urlStr, &request{method: http.MethodDelete, credentials: credentials, form: form})
+}
+
+// Put issues a PUT with the specified form.
+func (c *Client) Put(client *http.Client, credentials *Credentials, urlStr string, form url.Values) (*http.Response, error) {
+	ctx := context.WithValue(context.Background(), HTTPClient, client)
+	return c.PutContext(ctx, credentials, urlStr, form)
+}
+
+// PutContext uses Context to perform Put.
+func (c *Client) PutContext(ctx context.Context, credentials *Credentials, urlStr string, form url.Values) (*http.Response, error) {
+	return c.do(ctx, urlStr, &request{method: http.MethodPut, credentials: credentials, form: form})
+}
+
+func (c *Client) requestCredentials(ctx context.Context, u string, r *request) (*Credentials, url.Values, error) {
+	if r.method == "" {
+		r.method = http.MethodPost
+	}
+	resp, err := c.do(ctx, u, r)
+	if err != nil {
+		return nil, nil, err
+	}
+	p, err := ioutil.ReadAll(resp.Body)
+	resp.Body.Close()
+	if err != nil {
+		return nil, nil, RequestCredentialsError{StatusCode: resp.StatusCode, Header: resp.Header,
+			Body: p, msg: err.Error()}
+	}
+	if resp.StatusCode != 200 && resp.StatusCode != 201 {
+		return nil, nil, RequestCredentialsError{StatusCode: resp.StatusCode, Header: resp.Header,
+			Body: p, msg: fmt.Sprintf("OAuth server status %d, %s", resp.StatusCode, string(p))}
+	}
+	m, err := url.ParseQuery(string(p))
+	if err != nil {
+		return nil, nil, RequestCredentialsError{StatusCode: resp.StatusCode, Header: resp.Header,
+			Body: p, msg: err.Error()}
+	}
+	tokens := m["oauth_token"]
+	if len(tokens) == 0 || tokens[0] == "" {
+		return nil, nil, RequestCredentialsError{StatusCode: resp.StatusCode, Header: resp.Header,
+			Body: p, msg: "oauth: token missing from server result"}
+	}
+	secrets := m["oauth_token_secret"]
+	if len(secrets) == 0 { // allow "" as a valid secret.
+		return nil, nil, RequestCredentialsError{StatusCode: resp.StatusCode, Header: resp.Header,
+			Body: p, msg: "oauth: secret missing from server result"}
+	}
+	return &Credentials{Token: tokens[0], Secret: secrets[0]}, m, nil
+}
+
+// RequestTemporaryCredentials requests temporary credentials from the server.
+// See http://tools.ietf.org/html/rfc5849#section-2.1 for information about
+// temporary credentials.
+func (c *Client) RequestTemporaryCredentials(client *http.Client, callbackURL string, additionalParams url.Values) (*Credentials, error) {
+	ctx := context.WithValue(context.Background(), HTTPClient, client)
+	return c.RequestTemporaryCredentialsContext(ctx, callbackURL, additionalParams)
+}
+
+// RequestTemporaryCredentialsContext uses Context to perform RequestTemporaryCredentials.
+func (c *Client) RequestTemporaryCredentialsContext(ctx context.Context, callbackURL string, additionalParams url.Values) (*Credentials, error) {
+	credentials, _, err := c.requestCredentials(ctx, c.TemporaryCredentialRequestURI,
+		&request{method: c.TemporaryCredentialsMethod, form: additionalParams, callbackURL: callbackURL})
+	return credentials, err
+}
+
+// RequestToken requests token credentials from the server. See
+// http://tools.ietf.org/html/rfc5849#section-2.3 for information about token
+// credentials.
+func (c *Client) RequestToken(client *http.Client, temporaryCredentials *Credentials, verifier string) (*Credentials, url.Values, error) {
+	ctx := context.WithValue(context.Background(), HTTPClient, client)
+	return c.RequestTokenContext(ctx, temporaryCredentials, verifier)
+}
+
+// RequestTokenContext uses Context to perform RequestToken.
+func (c *Client) RequestTokenContext(ctx context.Context, temporaryCredentials *Credentials, verifier string) (*Credentials, url.Values, error) {
+	return c.requestCredentials(ctx, c.TokenRequestURI,
+		&request{credentials: temporaryCredentials, method: c.TokenCredentailsMethod, verifier: verifier})
+}
+
+// RenewRequestCredentials requests new token credentials from the server.
+// See http://wiki.oauth.net/w/page/12238549/ScalableOAuth#AccessTokenRenewal
+// for information about access token renewal.
+func (c *Client) RenewRequestCredentials(client *http.Client, credentials *Credentials, sessionHandle string) (*Credentials, url.Values, error) {
+	ctx := context.WithValue(context.Background(), HTTPClient, client)
+	return c.RenewRequestCredentialsContext(ctx, credentials, sessionHandle)
+}
+
+// RenewRequestCredentialsContext uses Context to perform RenewRequestCredentials.
+func (c *Client) RenewRequestCredentialsContext(ctx context.Context, credentials *Credentials, sessionHandle string) (*Credentials, url.Values, error) {
+	return c.requestCredentials(ctx, c.RenewCredentialRequestURI, &request{credentials: credentials, sessionHandle: sessionHandle})
+}
+
+// RequestTokenXAuth requests token credentials from the server using the xAuth protocol.
+// See https://dev.twitter.com/oauth/xauth for information on xAuth.
+func (c *Client) RequestTokenXAuth(client *http.Client, temporaryCredentials *Credentials, user, password string) (*Credentials, url.Values, error) {
+	ctx := context.WithValue(context.Background(), HTTPClient, client)
+	return c.RequestTokenXAuthContext(ctx, temporaryCredentials, user, password)
+}
+
+// RequestTokenXAuthContext uses Context to perform RequestTokenXAuth.
+func (c *Client) RequestTokenXAuthContext(ctx context.Context, temporaryCredentials *Credentials, user, password string) (*Credentials, url.Values, error) {
+	form := make(url.Values)
+	form.Set("x_auth_mode", "client_auth")
+	form.Set("x_auth_username", user)
+	form.Set("x_auth_password", password)
+	return c.requestCredentials(ctx, c.TokenRequestURI,
+		&request{credentials: temporaryCredentials, method: c.TokenCredentailsMethod, form: form})
+}
+
+// AuthorizationURL returns the URL for resource owner authorization. See
+// http://tools.ietf.org/html/rfc5849#section-2.2 for information about
+// resource owner authorization.
+func (c *Client) AuthorizationURL(temporaryCredentials *Credentials, additionalParams url.Values) string {
+	params := make(url.Values)
+	for k, vs := range additionalParams {
+		params[k] = vs
+	}
+	params.Set("oauth_token", temporaryCredentials.Token)
+	return c.ResourceOwnerAuthorizationURI + "?" + params.Encode()
+}
+
+// HTTPClient is the context key to use with context's
+// WithValue function to associate an *http.Client value with a context.
+var HTTPClient contextKey
+
+type contextKey struct{}
+
+func contextClient(ctx context.Context) *http.Client {
+	if ctx != nil {
+		if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok && hc != nil {
+			return hc
+		}
+	}
+	return http.DefaultClient
+}
+
+// RequestCredentialsError is an error containing
+// response information when requesting credentials.
+type RequestCredentialsError struct {
+	StatusCode int
+	Header     http.Header
+	Body       []byte
+	msg        string
+}
+
+func (e RequestCredentialsError) Error() string {
+	return e.msg
+}

+ 13 - 0
vendor/github.com/garyburd/go-oauth/oauth/oauth16.go

@@ -0,0 +1,13 @@
+// +build !go1.7
+
+package oauth
+
+import (
+	"net/http"
+
+	"golang.org/x/net/context"
+)
+
+func requestWithContext(ctx context.Context, req *http.Request) *http.Request {
+	return req
+}

+ 12 - 0
vendor/github.com/garyburd/go-oauth/oauth/oauth17.go

@@ -0,0 +1,12 @@
+// +build go1.7
+
+package oauth
+
+import (
+	"context"
+	"net/http"
+)
+
+func requestWithContext(ctx context.Context, req *http.Request) *http.Request {
+	return req.WithContext(ctx)
+}

+ 56 - 0
vendor/golang.org/x/net/context/context.go

@@ -0,0 +1,56 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package context defines the Context type, which carries deadlines,
+// cancelation signals, and other request-scoped values across API boundaries
+// and between processes.
+// As of Go 1.7 this package is available in the standard library under the
+// name context.  https://golang.org/pkg/context.
+//
+// Incoming requests to a server should create a Context, and outgoing calls to
+// servers should accept a Context. The chain of function calls between must
+// propagate the Context, optionally replacing it with a modified copy created
+// using WithDeadline, WithTimeout, WithCancel, or WithValue.
+//
+// Programs that use Contexts should follow these rules to keep interfaces
+// consistent across packages and enable static analysis tools to check context
+// propagation:
+//
+// Do not store Contexts inside a struct type; instead, pass a Context
+// explicitly to each function that needs it. The Context should be the first
+// parameter, typically named ctx:
+//
+// 	func DoSomething(ctx context.Context, arg Arg) error {
+// 		// ... use ctx ...
+// 	}
+//
+// Do not pass a nil Context, even if a function permits it. Pass context.TODO
+// if you are unsure about which Context to use.
+//
+// Use context Values only for request-scoped data that transits processes and
+// APIs, not for passing optional parameters to functions.
+//
+// The same Context may be passed to functions running in different goroutines;
+// Contexts are safe for simultaneous use by multiple goroutines.
+//
+// See http://blog.golang.org/context for example code for a server that uses
+// Contexts.
+package context // import "golang.org/x/net/context"
+
+// Background returns a non-nil, empty Context. It is never canceled, has no
+// values, and has no deadline. It is typically used by the main function,
+// initialization, and tests, and as the top-level Context for incoming
+// requests.
+func Background() Context {
+	return background
+}
+
+// TODO returns a non-nil, empty Context. Code should use context.TODO when
+// it's unclear which Context to use or it is not yet available (because the
+// surrounding function has not yet been extended to accept a Context
+// parameter).  TODO is recognized by static analysis tools that determine
+// whether Contexts are propagated correctly in a program.
+func TODO() Context {
+	return todo
+}

+ 72 - 0
vendor/golang.org/x/net/context/go17.go

@@ -0,0 +1,72 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.7
+
+package context
+
+import (
+	"context" // standard library's context, as of Go 1.7
+	"time"
+)
+
+var (
+	todo       = context.TODO()
+	background = context.Background()
+)
+
+// Canceled is the error returned by Context.Err when the context is canceled.
+var Canceled = context.Canceled
+
+// DeadlineExceeded is the error returned by Context.Err when the context's
+// deadline passes.
+var DeadlineExceeded = context.DeadlineExceeded
+
+// WithCancel returns a copy of parent with a new Done channel. The returned
+// context's Done channel is closed when the returned cancel function is called
+// or when the parent context's Done channel is closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
+	ctx, f := context.WithCancel(parent)
+	return ctx, CancelFunc(f)
+}
+
+// WithDeadline returns a copy of the parent context with the deadline adjusted
+// to be no later than d. If the parent's deadline is already earlier than d,
+// WithDeadline(parent, d) is semantically equivalent to parent. The returned
+// context's Done channel is closed when the deadline expires, when the returned
+// cancel function is called, or when the parent context's Done channel is
+// closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
+	ctx, f := context.WithDeadline(parent, deadline)
+	return ctx, CancelFunc(f)
+}
+
+// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete:
+//
+// 	func slowOperationWithTimeout(ctx context.Context) (Result, error) {
+// 		ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
+// 		defer cancel()  // releases resources if slowOperation completes before timeout elapses
+// 		return slowOperation(ctx)
+// 	}
+func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
+	return WithDeadline(parent, time.Now().Add(timeout))
+}
+
+// WithValue returns a copy of parent in which the value associated with key is
+// val.
+//
+// Use context Values only for request-scoped data that transits processes and
+// APIs, not for passing optional parameters to functions.
+func WithValue(parent Context, key interface{}, val interface{}) Context {
+	return context.WithValue(parent, key, val)
+}

+ 20 - 0
vendor/golang.org/x/net/context/go19.go

@@ -0,0 +1,20 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.9
+
+package context
+
+import "context" // standard library's context, as of Go 1.7
+
+// A Context carries a deadline, a cancelation signal, and other values across
+// API boundaries.
+//
+// Context's methods may be called by multiple goroutines simultaneously.
+type Context = context.Context
+
+// A CancelFunc tells an operation to abandon its work.
+// A CancelFunc does not wait for the work to stop.
+// After the first call, subsequent calls to a CancelFunc do nothing.
+type CancelFunc = context.CancelFunc

+ 300 - 0
vendor/golang.org/x/net/context/pre_go17.go

@@ -0,0 +1,300 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.7
+
+package context
+
+import (
+	"errors"
+	"fmt"
+	"sync"
+	"time"
+)
+
+// An emptyCtx is never canceled, has no values, and has no deadline. It is not
+// struct{}, since vars of this type must have distinct addresses.
+type emptyCtx int
+
+func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
+	return
+}
+
+func (*emptyCtx) Done() <-chan struct{} {
+	return nil
+}
+
+func (*emptyCtx) Err() error {
+	return nil
+}
+
+func (*emptyCtx) Value(key interface{}) interface{} {
+	return nil
+}
+
+func (e *emptyCtx) String() string {
+	switch e {
+	case background:
+		return "context.Background"
+	case todo:
+		return "context.TODO"
+	}
+	return "unknown empty Context"
+}
+
+var (
+	background = new(emptyCtx)
+	todo       = new(emptyCtx)
+)
+
+// Canceled is the error returned by Context.Err when the context is canceled.
+var Canceled = errors.New("context canceled")
+
+// DeadlineExceeded is the error returned by Context.Err when the context's
+// deadline passes.
+var DeadlineExceeded = errors.New("context deadline exceeded")
+
+// WithCancel returns a copy of parent with a new Done channel. The returned
+// context's Done channel is closed when the returned cancel function is called
+// or when the parent context's Done channel is closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
+	c := newCancelCtx(parent)
+	propagateCancel(parent, c)
+	return c, func() { c.cancel(true, Canceled) }
+}
+
+// newCancelCtx returns an initialized cancelCtx.
+func newCancelCtx(parent Context) *cancelCtx {
+	return &cancelCtx{
+		Context: parent,
+		done:    make(chan struct{}),
+	}
+}
+
+// propagateCancel arranges for child to be canceled when parent is.
+func propagateCancel(parent Context, child canceler) {
+	if parent.Done() == nil {
+		return // parent is never canceled
+	}
+	if p, ok := parentCancelCtx(parent); ok {
+		p.mu.Lock()
+		if p.err != nil {
+			// parent has already been canceled
+			child.cancel(false, p.err)
+		} else {
+			if p.children == nil {
+				p.children = make(map[canceler]bool)
+			}
+			p.children[child] = true
+		}
+		p.mu.Unlock()
+	} else {
+		go func() {
+			select {
+			case <-parent.Done():
+				child.cancel(false, parent.Err())
+			case <-child.Done():
+			}
+		}()
+	}
+}
+
+// parentCancelCtx follows a chain of parent references until it finds a
+// *cancelCtx. This function understands how each of the concrete types in this
+// package represents its parent.
+func parentCancelCtx(parent Context) (*cancelCtx, bool) {
+	for {
+		switch c := parent.(type) {
+		case *cancelCtx:
+			return c, true
+		case *timerCtx:
+			return c.cancelCtx, true
+		case *valueCtx:
+			parent = c.Context
+		default:
+			return nil, false
+		}
+	}
+}
+
+// removeChild removes a context from its parent.
+func removeChild(parent Context, child canceler) {
+	p, ok := parentCancelCtx(parent)
+	if !ok {
+		return
+	}
+	p.mu.Lock()
+	if p.children != nil {
+		delete(p.children, child)
+	}
+	p.mu.Unlock()
+}
+
+// A canceler is a context type that can be canceled directly. The
+// implementations are *cancelCtx and *timerCtx.
+type canceler interface {
+	cancel(removeFromParent bool, err error)
+	Done() <-chan struct{}
+}
+
+// A cancelCtx can be canceled. When canceled, it also cancels any children
+// that implement canceler.
+type cancelCtx struct {
+	Context
+
+	done chan struct{} // closed by the first cancel call.
+
+	mu       sync.Mutex
+	children map[canceler]bool // set to nil by the first cancel call
+	err      error             // set to non-nil by the first cancel call
+}
+
+func (c *cancelCtx) Done() <-chan struct{} {
+	return c.done
+}
+
+func (c *cancelCtx) Err() error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	return c.err
+}
+
+func (c *cancelCtx) String() string {
+	return fmt.Sprintf("%v.WithCancel", c.Context)
+}
+
+// cancel closes c.done, cancels each of c's children, and, if
+// removeFromParent is true, removes c from its parent's children.
+func (c *cancelCtx) cancel(removeFromParent bool, err error) {
+	if err == nil {
+		panic("context: internal error: missing cancel error")
+	}
+	c.mu.Lock()
+	if c.err != nil {
+		c.mu.Unlock()
+		return // already canceled
+	}
+	c.err = err
+	close(c.done)
+	for child := range c.children {
+		// NOTE: acquiring the child's lock while holding parent's lock.
+		child.cancel(false, err)
+	}
+	c.children = nil
+	c.mu.Unlock()
+
+	if removeFromParent {
+		removeChild(c.Context, c)
+	}
+}
+
+// WithDeadline returns a copy of the parent context with the deadline adjusted
+// to be no later than d. If the parent's deadline is already earlier than d,
+// WithDeadline(parent, d) is semantically equivalent to parent. The returned
+// context's Done channel is closed when the deadline expires, when the returned
+// cancel function is called, or when the parent context's Done channel is
+// closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
+	if cur, ok := parent.Deadline(); ok && cur.Before(deadline) {
+		// The current deadline is already sooner than the new one.
+		return WithCancel(parent)
+	}
+	c := &timerCtx{
+		cancelCtx: newCancelCtx(parent),
+		deadline:  deadline,
+	}
+	propagateCancel(parent, c)
+	d := deadline.Sub(time.Now())
+	if d <= 0 {
+		c.cancel(true, DeadlineExceeded) // deadline has already passed
+		return c, func() { c.cancel(true, Canceled) }
+	}
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	if c.err == nil {
+		c.timer = time.AfterFunc(d, func() {
+			c.cancel(true, DeadlineExceeded)
+		})
+	}
+	return c, func() { c.cancel(true, Canceled) }
+}
+
+// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to
+// implement Done and Err. It implements cancel by stopping its timer then
+// delegating to cancelCtx.cancel.
+type timerCtx struct {
+	*cancelCtx
+	timer *time.Timer // Under cancelCtx.mu.
+
+	deadline time.Time
+}
+
+func (c *timerCtx) Deadline() (deadline time.Time, ok bool) {
+	return c.deadline, true
+}
+
+func (c *timerCtx) String() string {
+	return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now()))
+}
+
+func (c *timerCtx) cancel(removeFromParent bool, err error) {
+	c.cancelCtx.cancel(false, err)
+	if removeFromParent {
+		// Remove this timerCtx from its parent cancelCtx's children.
+		removeChild(c.cancelCtx.Context, c)
+	}
+	c.mu.Lock()
+	if c.timer != nil {
+		c.timer.Stop()
+		c.timer = nil
+	}
+	c.mu.Unlock()
+}
+
+// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete:
+//
+// 	func slowOperationWithTimeout(ctx context.Context) (Result, error) {
+// 		ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
+// 		defer cancel()  // releases resources if slowOperation completes before timeout elapses
+// 		return slowOperation(ctx)
+// 	}
+func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
+	return WithDeadline(parent, time.Now().Add(timeout))
+}
+
+// WithValue returns a copy of parent in which the value associated with key is
+// val.
+//
+// Use context Values only for request-scoped data that transits processes and
+// APIs, not for passing optional parameters to functions.
+func WithValue(parent Context, key interface{}, val interface{}) Context {
+	return &valueCtx{parent, key, val}
+}
+
+// A valueCtx carries a key-value pair. It implements Value for that key and
+// delegates all other calls to the embedded Context.
+type valueCtx struct {
+	Context
+	key, val interface{}
+}
+
+func (c *valueCtx) String() string {
+	return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val)
+}
+
+func (c *valueCtx) Value(key interface{}) interface{} {
+	if c.key == key {
+		return c.val
+	}
+	return c.Context.Value(key)
+}

+ 109 - 0
vendor/golang.org/x/net/context/pre_go19.go

@@ -0,0 +1,109 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.9
+
+package context
+
+import "time"
+
+// A Context carries a deadline, a cancelation signal, and other values across
+// API boundaries.
+//
+// Context's methods may be called by multiple goroutines simultaneously.
+type Context interface {
+	// Deadline returns the time when work done on behalf of this context
+	// should be canceled. Deadline returns ok==false when no deadline is
+	// set. Successive calls to Deadline return the same results.
+	Deadline() (deadline time.Time, ok bool)
+
+	// Done returns a channel that's closed when work done on behalf of this
+	// context should be canceled. Done may return nil if this context can
+	// never be canceled. Successive calls to Done return the same value.
+	//
+	// WithCancel arranges for Done to be closed when cancel is called;
+	// WithDeadline arranges for Done to be closed when the deadline
+	// expires; WithTimeout arranges for Done to be closed when the timeout
+	// elapses.
+	//
+	// Done is provided for use in select statements:
+	//
+	//  // Stream generates values with DoSomething and sends them to out
+	//  // until DoSomething returns an error or ctx.Done is closed.
+	//  func Stream(ctx context.Context, out chan<- Value) error {
+	//  	for {
+	//  		v, err := DoSomething(ctx)
+	//  		if err != nil {
+	//  			return err
+	//  		}
+	//  		select {
+	//  		case <-ctx.Done():
+	//  			return ctx.Err()
+	//  		case out <- v:
+	//  		}
+	//  	}
+	//  }
+	//
+	// See http://blog.golang.org/pipelines for more examples of how to use
+	// a Done channel for cancelation.
+	Done() <-chan struct{}
+
+	// Err returns a non-nil error value after Done is closed. Err returns
+	// Canceled if the context was canceled or DeadlineExceeded if the
+	// context's deadline passed. No other values for Err are defined.
+	// After Done is closed, successive calls to Err return the same value.
+	Err() error
+
+	// Value returns the value associated with this context for key, or nil
+	// if no value is associated with key. Successive calls to Value with
+	// the same key returns the same result.
+	//
+	// Use context values only for request-scoped data that transits
+	// processes and API boundaries, not for passing optional parameters to
+	// functions.
+	//
+	// A key identifies a specific value in a Context. Functions that wish
+	// to store values in Context typically allocate a key in a global
+	// variable then use that key as the argument to context.WithValue and
+	// Context.Value. A key can be any type that supports equality;
+	// packages should define keys as an unexported type to avoid
+	// collisions.
+	//
+	// Packages that define a Context key should provide type-safe accessors
+	// for the values stores using that key:
+	//
+	// 	// Package user defines a User type that's stored in Contexts.
+	// 	package user
+	//
+	// 	import "golang.org/x/net/context"
+	//
+	// 	// User is the type of value stored in the Contexts.
+	// 	type User struct {...}
+	//
+	// 	// key is an unexported type for keys defined in this package.
+	// 	// This prevents collisions with keys defined in other packages.
+	// 	type key int
+	//
+	// 	// userKey is the key for user.User values in Contexts. It is
+	// 	// unexported; clients use user.NewContext and user.FromContext
+	// 	// instead of using this key directly.
+	// 	var userKey key = 0
+	//
+	// 	// NewContext returns a new Context that carries value u.
+	// 	func NewContext(ctx context.Context, u *User) context.Context {
+	// 		return context.WithValue(ctx, userKey, u)
+	// 	}
+	//
+	// 	// FromContext returns the User value stored in ctx, if any.
+	// 	func FromContext(ctx context.Context) (*User, bool) {
+	// 		u, ok := ctx.Value(userKey).(*User)
+	// 		return u, ok
+	// 	}
+	Value(key interface{}) interface{}
+}
+
+// A CancelFunc tells an operation to abandon its work.
+// A CancelFunc does not wait for the work to stop.
+// After the first call, subsequent calls to a CancelFunc do nothing.
+type CancelFunc func()